mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
321 Commits
processSlo
...
e2e-blockr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b0ba6ba97 | ||
|
|
66727a853f | ||
|
|
10fadd0ac3 | ||
|
|
b9ec6837ab | ||
|
|
0254d31810 | ||
|
|
25ce2ac4dc | ||
|
|
80e9042c6b | ||
|
|
6b7e8ac00e | ||
|
|
ca770f12ee | ||
|
|
74bebd9244 | ||
|
|
447b42044a | ||
|
|
6ec8d23d4f | ||
|
|
a41d80a03d | ||
|
|
849c1dd25b | ||
|
|
d430266b70 | ||
|
|
6d52516638 | ||
|
|
2ff3a82eac | ||
|
|
d403b68d76 | ||
|
|
b0768e39c3 | ||
|
|
cae57ee9f6 | ||
|
|
3104fd217d | ||
|
|
d1b9954566 | ||
|
|
033b165c08 | ||
|
|
78175ee0fd | ||
|
|
a34cc7f7bf | ||
|
|
6f22cd7963 | ||
|
|
58967e4516 | ||
|
|
947c9fbe60 | ||
|
|
8cc1e67e6c | ||
|
|
2f3deac8b0 | ||
|
|
aa44ba40ab | ||
|
|
8b268d646c | ||
|
|
d6ecadb471 | ||
|
|
9a8bde448e | ||
|
|
ce71b3b6b1 | ||
|
|
5fa30bf73a | ||
|
|
b93aba6126 | ||
|
|
51ed80df69 | ||
|
|
a614c4ac8c | ||
|
|
7b777a10a5 | ||
|
|
bf1ab9951f | ||
|
|
5cea2b3855 | ||
|
|
ab63757fe5 | ||
|
|
736ed1e003 | ||
|
|
6ee9707fd7 | ||
|
|
e40835b1a9 | ||
|
|
216a420bbc | ||
|
|
4845abecb8 | ||
|
|
08a910e44f | ||
|
|
67ba2c4fe3 | ||
|
|
7efa501bdc | ||
|
|
879a694ab3 | ||
|
|
4e3b1881ed | ||
|
|
35d3707de7 | ||
|
|
1f468bd3f5 | ||
|
|
ac32098c86 | ||
|
|
e0af005c42 | ||
|
|
f08af1bdbf | ||
|
|
6d0420fde5 | ||
|
|
5172e6e362 | ||
|
|
42df0f70b6 | ||
|
|
dace0f6a2d | ||
|
|
26a5878181 | ||
|
|
db6474a3e4 | ||
|
|
b84851fd0d | ||
|
|
673702c100 | ||
|
|
520eb6baca | ||
|
|
e6047dc344 | ||
|
|
d86a452b15 | ||
|
|
67f9d0b9c4 | ||
|
|
21cd055b84 | ||
|
|
9f3bb623ec | ||
|
|
b10a95097e | ||
|
|
4561f5cacb | ||
|
|
50b672a4db | ||
|
|
ffbb73a59b | ||
|
|
649974f14d | ||
|
|
9ec0bc0734 | ||
|
|
9649e49658 | ||
|
|
49fdcb7347 | ||
|
|
cd6ee956ed | ||
|
|
ef95fd33f8 | ||
|
|
1a488241b0 | ||
|
|
5fdd3a3d66 | ||
|
|
b6a32c050f | ||
|
|
055e225093 | ||
|
|
144218cb1b | ||
|
|
13b575a609 | ||
|
|
b5a414eae9 | ||
|
|
b94b347ace | ||
|
|
f5ee225819 | ||
|
|
9cb48be14f | ||
|
|
85fa9951eb | ||
|
|
ec72575fc9 | ||
|
|
d9d1bb6d3d | ||
|
|
ffcdc26618 | ||
|
|
96981a07b9 | ||
|
|
6b2721b239 | ||
|
|
c79151a574 | ||
|
|
4b20234801 | ||
|
|
911048aa6d | ||
|
|
255e9693ee | ||
|
|
61c1216e3d | ||
|
|
17e1eaf0f3 | ||
|
|
9940943595 | ||
|
|
9a0f941870 | ||
|
|
5d0f54d332 | ||
|
|
d602c94b7b | ||
|
|
6a5ecbd68f | ||
|
|
29dfcab505 | ||
|
|
16e5c903cc | ||
|
|
66682cb4e5 | ||
|
|
52faea8b7d | ||
|
|
8a78315682 | ||
|
|
cab42a4ae3 | ||
|
|
a5bdd42bdd | ||
|
|
a26197f919 | ||
|
|
8b9cab457e | ||
|
|
080ce31395 | ||
|
|
7866e8a196 | ||
|
|
d5d17e00b3 | ||
|
|
9c6a1331cf | ||
|
|
d89c97634c | ||
|
|
7e95ca3705 | ||
|
|
abd46b01b7 | ||
|
|
8629ac8417 | ||
|
|
304925aabf | ||
|
|
16d93e47a5 | ||
|
|
6dcb2bbf0d | ||
|
|
deb138959a | ||
|
|
45e6f3bd00 | ||
|
|
55a9e0d51a | ||
|
|
3ddae600fb | ||
|
|
861ede8945 | ||
|
|
93f11f9047 | ||
|
|
56503110dd | ||
|
|
f67d35dffd | ||
|
|
efbca1b5b7 | ||
|
|
2de0ebaf8d | ||
|
|
0815ef94a3 | ||
|
|
092ffa99e5 | ||
|
|
b05b67b264 | ||
|
|
a5c6518c20 | ||
|
|
da048395ce | ||
|
|
f31f7be310 | ||
|
|
e1a2267f86 | ||
|
|
3c9e4ee7f7 | ||
|
|
9ba32c9acd | ||
|
|
d23008452e | ||
|
|
f397cba1e0 | ||
|
|
3eecbb5b1a | ||
|
|
1583e93b48 | ||
|
|
849457df81 | ||
|
|
903cab75ee | ||
|
|
ee108d4aff | ||
|
|
49bcc58762 | ||
|
|
a08baf4a14 | ||
|
|
8c56dfdd46 | ||
|
|
dcdd9af9db | ||
|
|
a464cf5c60 | ||
|
|
cc55c754dc | ||
|
|
2d483ab09f | ||
|
|
d64e10a337 | ||
|
|
1e9ee10674 | ||
|
|
3ac395b39e | ||
|
|
6e26a6f128 | ||
|
|
b512b92a8a | ||
|
|
5ff601a1b9 | ||
|
|
5823054519 | ||
|
|
3d196662bc | ||
|
|
b0601580ef | ||
|
|
c1f29ea651 | ||
|
|
881d1d435a | ||
|
|
d1aae0c941 | ||
|
|
468cc23876 | ||
|
|
d9646a9183 | ||
|
|
279cee42f1 | ||
|
|
57bdb907cc | ||
|
|
15d683c78f | ||
|
|
bf6c8ced7d | ||
|
|
78fb685027 | ||
|
|
a87536eba0 | ||
|
|
3f05395a00 | ||
|
|
85fc57d41e | ||
|
|
1e5976d5ce | ||
|
|
98c0b23350 | ||
|
|
039a0fffba | ||
|
|
90ec640e7a | ||
|
|
10acd31d25 | ||
|
|
4224014fad | ||
|
|
df1e8b33d8 | ||
|
|
cdb4ee42cc | ||
|
|
d29baec77e | ||
|
|
53c189da9b | ||
|
|
277fbce61b | ||
|
|
0adc54b7ff | ||
|
|
1cbd7e9888 | ||
|
|
0a9e1658dd | ||
|
|
31d4a4cd11 | ||
|
|
fbc4e73d31 | ||
|
|
c1d4eaa79d | ||
|
|
760af6428e | ||
|
|
dfa0ccf626 | ||
|
|
7a142cf324 | ||
|
|
1a51fdbd58 | ||
|
|
368a99ec8d | ||
|
|
1c7e734918 | ||
|
|
764d1325bf | ||
|
|
0cf30e9022 | ||
|
|
227b20f368 | ||
|
|
d7d70bc25b | ||
|
|
82f6ddb693 | ||
|
|
9e4e82d2c5 | ||
|
|
9838369fe9 | ||
|
|
6085ad1bfa | ||
|
|
d3851b27df | ||
|
|
d6100dfdcb | ||
|
|
c2144dac86 | ||
|
|
a47ff569a8 | ||
|
|
f8be022ef2 | ||
|
|
4f39e6b685 | ||
|
|
c67b000633 | ||
|
|
02f7443586 | ||
|
|
6275e7df4e | ||
|
|
1b6b52fda1 | ||
|
|
5fa1fd84b9 | ||
|
|
bd0c9f9e8d | ||
|
|
2532bb370c | ||
|
|
12efc6c2c1 | ||
|
|
a6cc9ac9c5 | ||
|
|
031f5845a2 | ||
|
|
b88559726c | ||
|
|
ca6ddf4490 | ||
|
|
3ebb2fce94 | ||
|
|
62f6b07cba | ||
|
|
f956f1ed6e | ||
|
|
1c0fa95053 | ||
|
|
04bf4a1060 | ||
|
|
ae276fd371 | ||
|
|
104bdaed12 | ||
|
|
089a5d6ac2 | ||
|
|
16b0820193 | ||
|
|
4b02267e96 | ||
|
|
746584c453 | ||
|
|
b56daaaca2 | ||
|
|
b7a6fe88ee | ||
|
|
22d1c37b92 | ||
|
|
78a393f825 | ||
|
|
ac8290c1bf | ||
|
|
5d0662b415 | ||
|
|
931e5e10c3 | ||
|
|
c172f838b1 | ||
|
|
c07ae29cd9 | ||
|
|
214c9bfd8b | ||
|
|
716140d64d | ||
|
|
088cb4ef59 | ||
|
|
fa33e93a8e | ||
|
|
d1472fc351 | ||
|
|
5c8c0c31d8 | ||
|
|
7f3c00c7a2 | ||
|
|
c180dab791 | ||
|
|
f24acc21c7 | ||
|
|
40b637849d | ||
|
|
e7db1685df | ||
|
|
eccbfd1011 | ||
|
|
90211f6769 | ||
|
|
edc32ac18e | ||
|
|
fe68e020e3 | ||
|
|
81e1e3544d | ||
|
|
09372d5c35 | ||
|
|
078a89e4ca | ||
|
|
dbc6ae26a6 | ||
|
|
b6f429867a | ||
|
|
09f50660ce | ||
|
|
189825b495 | ||
|
|
441cad58d4 | ||
|
|
1277d08f9e | ||
|
|
e03de47db7 | ||
|
|
764b7ff610 | ||
|
|
307be7694e | ||
|
|
c76ae1ef39 | ||
|
|
d499db7f0e | ||
|
|
a894b9f29a | ||
|
|
902e6b3905 | ||
|
|
ed2d1c7bf9 | ||
|
|
14b73cbd47 | ||
|
|
a39c7aa864 | ||
|
|
170bc9c8ec | ||
|
|
365c01fc29 | ||
|
|
3124785a08 | ||
|
|
60e6306107 | ||
|
|
42ccb7830a | ||
|
|
0bb03b9292 | ||
|
|
ed6fbf1480 | ||
|
|
477cec6021 | ||
|
|
924500d111 | ||
|
|
0677504ef1 | ||
|
|
ca2a7c4d9c | ||
|
|
28606629ad | ||
|
|
c817279464 | ||
|
|
009d6ed8ed | ||
|
|
5cec1282a9 | ||
|
|
340170fd29 | ||
|
|
7ed0cc139a | ||
|
|
2c822213eb | ||
|
|
0894b9591c | ||
|
|
f0ca45f9a2 | ||
|
|
afc48c6485 | ||
|
|
93dce8a0cb | ||
|
|
149ccdaf39 | ||
|
|
c08bb39ffe | ||
|
|
5083d8ab34 | ||
|
|
7552a5dd07 | ||
|
|
c93d68f853 | ||
|
|
2b74db2dce | ||
|
|
cc6c91415d | ||
|
|
6d7d7e0adc | ||
|
|
2105d777f0 | ||
|
|
14338afbdb | ||
|
|
3e8aa4023d | ||
|
|
b443875e66 |
@@ -21,7 +21,7 @@ linters:
|
|||||||
linters-settings:
|
linters-settings:
|
||||||
gocognit:
|
gocognit:
|
||||||
# TODO: We should target for < 50
|
# TODO: We should target for < 50
|
||||||
min-complexity: 65
|
min-complexity: 69
|
||||||
|
|
||||||
output:
|
output:
|
||||||
print-issued-lines: true
|
print-issued-lines: true
|
||||||
|
|||||||
@@ -483,7 +483,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
|||||||
version := bytesutil.ToBytes4(vSlice)
|
version := bytesutil.ToBytes4(vSlice)
|
||||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||||
Version: version,
|
Version: version,
|
||||||
Epoch: primitives.Epoch(uint64(epoch)),
|
Epoch: primitives.Epoch(epoch),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(ofs)
|
sort.Sort(ofs)
|
||||||
|
|||||||
@@ -301,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
|||||||
|
|
||||||
var attr payloadattribute.Attributer
|
var attr payloadattribute.Attributer
|
||||||
switch st.Version() {
|
switch st.Version() {
|
||||||
case version.Capella:
|
case version.Capella, version.Deneb:
|
||||||
withdrawals, err := st.ExpectedWithdrawals()
|
withdrawals, err := st.ExpectedWithdrawals()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||||
|
|||||||
@@ -60,7 +60,13 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
|||||||
log = log.WithField("txCount", len(txs))
|
log = log.WithField("txCount", len(txs))
|
||||||
txsPerSlotCount.Set(float64(len(txs)))
|
txsPerSlotCount.Set(float64(len(txs)))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if b.Version() >= version.Deneb {
|
||||||
|
k, err := b.Body().BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log = log.WithField("blobCount", len(k))
|
||||||
}
|
}
|
||||||
log.Info("Finished applying state transition")
|
log.Info("Finished applying state transition")
|
||||||
return nil
|
return nil
|
||||||
@@ -96,6 +102,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
|||||||
"finalizedEpoch": finalized.Epoch,
|
"finalizedEpoch": finalized.Epoch,
|
||||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||||
"epoch": slots.ToEpoch(block.Slot()),
|
"epoch": slots.ToEpoch(block.Slot()),
|
||||||
|
"version": version.String(block.Version()),
|
||||||
}).Info("Synced new block")
|
}).Info("Synced new block")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -57,6 +57,11 @@ type mockBroadcaster struct {
|
|||||||
broadcastCalled bool
|
broadcastCalled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mb *mockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||||
|
//TODO implement me
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||||
mb.broadcastCalled = true
|
mb.broadcastCalled = true
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -9,6 +9,110 @@ import (
|
|||||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||||
|
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||||
|
epoch := time.CurrentEpoch(state)
|
||||||
|
|
||||||
|
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inactivityScores, err := state.InactivityScores()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
txRoot, err := payloadHeader.TransactionsRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
wi, err := state.NextWithdrawalIndex()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
vi, err := state.NextWithdrawalValidatorIndex()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
summarires, err := state.HistoricalSummaries()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := ðpb.BeaconStateDeneb{
|
||||||
|
GenesisTime: state.GenesisTime(),
|
||||||
|
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||||
|
Slot: state.Slot(),
|
||||||
|
Fork: ðpb.Fork{
|
||||||
|
PreviousVersion: state.Fork().CurrentVersion,
|
||||||
|
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||||
|
Epoch: epoch,
|
||||||
|
},
|
||||||
|
LatestBlockHeader: state.LatestBlockHeader(),
|
||||||
|
BlockRoots: state.BlockRoots(),
|
||||||
|
StateRoots: state.StateRoots(),
|
||||||
|
HistoricalRoots: [][]byte{},
|
||||||
|
Eth1Data: state.Eth1Data(),
|
||||||
|
Eth1DataVotes: state.Eth1DataVotes(),
|
||||||
|
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||||
|
Validators: state.Validators(),
|
||||||
|
Balances: state.Balances(),
|
||||||
|
RandaoMixes: state.RandaoMixes(),
|
||||||
|
Slashings: state.Slashings(),
|
||||||
|
PreviousEpochParticipation: prevEpochParticipation,
|
||||||
|
CurrentEpochParticipation: currentEpochParticipation,
|
||||||
|
JustificationBits: state.JustificationBits(),
|
||||||
|
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||||
|
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||||
|
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||||
|
InactivityScores: inactivityScores,
|
||||||
|
CurrentSyncCommittee: currentSyncCommittee,
|
||||||
|
NextSyncCommittee: nextSyncCommittee,
|
||||||
|
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||||
|
ParentHash: payloadHeader.ParentHash(),
|
||||||
|
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||||
|
StateRoot: payloadHeader.StateRoot(),
|
||||||
|
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||||
|
LogsBloom: payloadHeader.LogsBloom(),
|
||||||
|
PrevRandao: payloadHeader.PrevRandao(),
|
||||||
|
BlockNumber: payloadHeader.BlockNumber(),
|
||||||
|
GasLimit: payloadHeader.GasLimit(),
|
||||||
|
GasUsed: payloadHeader.GasUsed(),
|
||||||
|
Timestamp: payloadHeader.Timestamp(),
|
||||||
|
ExtraData: payloadHeader.ExtraData(),
|
||||||
|
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||||
|
BlockHash: payloadHeader.BlockHash(),
|
||||||
|
ExcessDataGas: make([]byte, 32),
|
||||||
|
TransactionsRoot: txRoot,
|
||||||
|
WithdrawalsRoot: wdRoot,
|
||||||
|
},
|
||||||
|
NextWithdrawalIndex: wi,
|
||||||
|
NextWithdrawalValidatorIndex: vi,
|
||||||
|
HistoricalSummaries: summarires,
|
||||||
|
}
|
||||||
|
|
||||||
|
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||||
|
}
|
||||||
|
|
||||||
// UpgradeToCapella updates a generic state to return the version Capella state.
|
// UpgradeToCapella updates a generic state to return the version Capella state.
|
||||||
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||||
epoch := time.CurrentEpoch(state)
|
epoch := time.CurrentEpoch(state)
|
||||||
|
|||||||
@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
|
||||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||||
currentEpoch := time.CurrentEpoch(state)
|
currentEpoch := time.CurrentEpoch(state)
|
||||||
nextEpoch := currentEpoch + 1
|
nextEpoch := currentEpoch + 1
|
||||||
|
|||||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
|||||||
return epochStart && capellaEpoch
|
return epochStart && capellaEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||||
|
// Spec code:
|
||||||
|
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||||
|
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||||
|
epochStart := slots.IsEpochStart(slot)
|
||||||
|
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||||
|
return epochStart && DenebEpoch
|
||||||
|
}
|
||||||
|
|
||||||
// CanProcessEpoch checks the eligibility to process epoch.
|
// CanProcessEpoch checks the eligibility to process epoch.
|
||||||
// The epoch can be processed at the end of the last slot of every epoch.
|
// The epoch can be processed at the end of the last slot of every epoch.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ go_library(
|
|||||||
"//beacon-chain/core/execution:go_default_library",
|
"//beacon-chain/core/execution:go_default_library",
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/core/transition/interop:go_default_library",
|
|
||||||
"//beacon-chain/core/validators:go_default_library",
|
"//beacon-chain/core/validators:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
"//beacon-chain/state/state-native:go_default_library",
|
||||||
@@ -46,6 +45,7 @@ go_library(
|
|||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
|
"@com_github_protolambda_go_kzg//eth:go_default_library",
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@io_opencensus_go//trace:go_default_library",
|
"@io_opencensus_go//trace:go_default_library",
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -302,6 +302,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||||
|
state, err = capella.UpgradeToDeneb(state)
|
||||||
|
if err != nil {
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if highestSlot < state.Slot() {
|
if highestSlot < state.Slot() {
|
||||||
|
|||||||
@@ -6,16 +6,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/protolambda/go-kzg/eth"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||||
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
|
||||||
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -57,9 +59,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
|
||||||
interop.WriteStateToDisk(st)
|
|
||||||
|
|
||||||
parentRoot := signed.Block().ParentRoot()
|
parentRoot := signed.Block().ParentRoot()
|
||||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -256,7 +255,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case version.Altair, version.Bellatrix, version.Capella:
|
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -356,9 +355,48 @@ func ProcessBlockForStateRoot(
|
|||||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if signed.Block().Version() == version.Deneb {
|
||||||
|
err := ValidateBlobKzgs(ctx, signed.Block().Body())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not validate blob kzgs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return state, nil
|
return state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateBlobKzgs validates the blob kzgs in the beacon block.
|
||||||
|
//
|
||||||
|
// Spec code:
|
||||||
|
// def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
|
||||||
|
//
|
||||||
|
// assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||||
|
func ValidateBlobKzgs(ctx context.Context, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||||
|
_, span := trace.StartSpan(ctx, "core.state.ValidateBlobKzgs")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
payload, err := body.Execution()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get execution payload from block")
|
||||||
|
}
|
||||||
|
blkKzgs, err := body.BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get blob kzg commitments from block")
|
||||||
|
}
|
||||||
|
kzgs := make(eth.KZGCommitmentSequenceImpl, len(blkKzgs))
|
||||||
|
for i := range blkKzgs {
|
||||||
|
kzgs[i] = bytesutil.ToBytes48(blkKzgs[i])
|
||||||
|
}
|
||||||
|
txs, err := payload.Transactions()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get transactions from payload")
|
||||||
|
}
|
||||||
|
if err := eth.VerifyKZGCommitmentsAgainstTransactions(txs, kzgs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// This calls altair block operations.
|
// This calls altair block operations.
|
||||||
func altairOperations(
|
func altairOperations(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
|||||||
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
|||||||
// Fee recipients operations.
|
// Fee recipients operations.
|
||||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||||
|
|
||||||
|
// Blob operations.
|
||||||
|
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||||
|
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||||
|
|
||||||
// origin checkpoint sync support
|
// origin checkpoint sync support
|
||||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
|||||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||||
|
|
||||||
|
// Blob operations.
|
||||||
|
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||||
|
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||||
|
|
||||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"archived_point.go",
|
"archived_point.go",
|
||||||
"backup.go",
|
"backup.go",
|
||||||
|
"blob.go",
|
||||||
"blocks.go",
|
"blocks.go",
|
||||||
"checkpoint.go",
|
"checkpoint.go",
|
||||||
"deposit_contract.go",
|
"deposit_contract.go",
|
||||||
@@ -74,6 +75,7 @@ go_test(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"archived_point_test.go",
|
"archived_point_test.go",
|
||||||
"backup_test.go",
|
"backup_test.go",
|
||||||
|
"blob_test.go",
|
||||||
"blocks_test.go",
|
"blocks_test.go",
|
||||||
"checkpoint_test.go",
|
"checkpoint_test.go",
|
||||||
"deposit_contract_test.go",
|
"deposit_contract_test.go",
|
||||||
@@ -107,9 +109,11 @@ go_test(
|
|||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//proto/testing:go_default_library",
|
"//proto/testing:go_default_library",
|
||||||
"//testing/assert:go_default_library",
|
"//testing/assert:go_default_library",
|
||||||
|
"//testing/assertions:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
"//testing/util:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
|
|||||||
182
beacon-chain/db/kv/blob.go
Normal file
182
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
package kv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||||
|
//
|
||||||
|
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||||
|
//
|
||||||
|
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||||
|
//
|
||||||
|
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||||
|
// in the rotating keys buffer, we overwrite all elements for that slot.
|
||||||
|
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if len(scs) == 0 {
|
||||||
|
return errors.New("nil or empty blob sidecars")
|
||||||
|
}
|
||||||
|
slot := scs[0].Slot
|
||||||
|
|
||||||
|
return s.db.Update(func(tx *bolt.Tx) error {
|
||||||
|
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bkt := tx.Bucket(blobsBucket)
|
||||||
|
c := bkt.Cursor()
|
||||||
|
newKey := blobSidecarKey(scs[0])
|
||||||
|
rotatingBufferPrefix := newKey[0:8]
|
||||||
|
var replacingKey []byte
|
||||||
|
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||||
|
if len(k) != 0 {
|
||||||
|
replacingKey = k
|
||||||
|
oldSlotBytes := replacingKey[8:16]
|
||||||
|
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
|
||||||
|
if oldSlot >= slot {
|
||||||
|
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||||
|
// store the blob by key and exit early.
|
||||||
|
if len(replacingKey) == 0 {
|
||||||
|
return bkt.Put(newKey, encodedBlobSidecar)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bkt.Delete(replacingKey); err != nil {
|
||||||
|
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||||
|
}
|
||||||
|
return bkt.Put(newKey, encodedBlobSidecar)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
|
||||||
|
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||||
|
// Otherwise, the result will be filtered to only include the specified indices.
|
||||||
|
// An error will result if an invalid index is specified.
|
||||||
|
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
var enc []byte
|
||||||
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
|
c := tx.Bucket(blobsBucket).Cursor()
|
||||||
|
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
if bytes.HasSuffix(k, root[:]) {
|
||||||
|
enc = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if enc == nil {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
sc := ðpb.BlobSidecars{}
|
||||||
|
if err := decode(ctx, enc, sc); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filterForIndices(sc, indices...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||||
|
if len(indices) == 0 {
|
||||||
|
return sc.Sidecars, nil
|
||||||
|
}
|
||||||
|
// NB: This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
|
||||||
|
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||||
|
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||||
|
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||||
|
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||||
|
for i, idx := range indices {
|
||||||
|
if idx > maxIdx {
|
||||||
|
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||||
|
}
|
||||||
|
sidecars[i] = sc.Sidecars[idx]
|
||||||
|
}
|
||||||
|
return sidecars, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||||
|
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||||
|
// Otherwise, the result will be filtered to only include the specified indices.
|
||||||
|
// An error will result if an invalid index is specified.
|
||||||
|
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
var enc []byte
|
||||||
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
|
c := tx.Bucket(blobsBucket).Cursor()
|
||||||
|
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||||
|
if slotInKey == slot {
|
||||||
|
enc = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if enc == nil {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
sc := ðpb.BlobSidecars{}
|
||||||
|
if err := decode(ctx, enc, sc); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filterForIndices(sc, indices...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||||
|
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||||
|
defer span.End()
|
||||||
|
return s.db.Update(func(tx *bolt.Tx) error {
|
||||||
|
bkt := tx.Bucket(blobsBucket)
|
||||||
|
c := bkt.Cursor()
|
||||||
|
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||||
|
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||||
|
if err := bkt.Delete(k); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||||
|
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||||
|
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||||
|
slotInRotatingBuffer := blob.Slot.ModSlot(maxSlotsToPersistBlobs)
|
||||||
|
key := bytesutil.SlotToBytesBigEndian(slotInRotatingBuffer)
|
||||||
|
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||||
|
key = append(key, blob.BlockRoot...)
|
||||||
|
return key
|
||||||
|
}
|
||||||
239
beacon-chain/db/kv/blob_test.go
Normal file
239
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
package kv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||||
|
if len(expect) != len(got) {
|
||||||
|
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||||
|
}
|
||||||
|
for i := 0; i < len(expect); i++ {
|
||||||
|
es := expect[i]
|
||||||
|
gs := got[i]
|
||||||
|
var e string
|
||||||
|
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
|
||||||
|
if e != "" {
|
||||||
|
return errors.New(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_BlobSidecars(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
t.Run("empty", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, 0)
|
||||||
|
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||||
|
})
|
||||||
|
t.Run("empty by root", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||||
|
require.ErrorIs(t, ErrNotFound, err)
|
||||||
|
require.Equal(t, 0, len(got))
|
||||||
|
})
|
||||||
|
t.Run("empty by slot", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||||
|
require.ErrorIs(t, ErrNotFound, err)
|
||||||
|
require.Equal(t, 0, len(got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, 1)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, 1, len(scs))
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
|
||||||
|
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||||
|
expect := make([]*ethpb.BlobSidecar, 2)
|
||||||
|
expect[0] = scs[0]
|
||||||
|
expect[1] = scs[3]
|
||||||
|
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(expect, got))
|
||||||
|
require.Equal(t, uint64(0), got[0].Index)
|
||||||
|
require.Equal(t, uint64(3), got[1].Index)
|
||||||
|
})
|
||||||
|
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||||
|
require.ErrorIs(t, err, ErrNotFound)
|
||||||
|
require.Equal(t, 0, len(got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, 1)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, 1, len(scs))
|
||||||
|
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
})
|
||||||
|
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
|
||||||
|
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||||
|
expect := make([]*ethpb.BlobSidecar, 2)
|
||||||
|
expect[0] = scs[0]
|
||||||
|
expect[1] = scs[3]
|
||||||
|
|
||||||
|
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(expect, got))
|
||||||
|
|
||||||
|
require.Equal(t, uint64(0), got[0].Index)
|
||||||
|
require.Equal(t, uint64(3), got[1].Index)
|
||||||
|
})
|
||||||
|
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
|
||||||
|
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||||
|
require.ErrorIs(t, err, ErrNotFound)
|
||||||
|
require.Equal(t, 0, len(got))
|
||||||
|
})
|
||||||
|
t.Run("delete works", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||||
|
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||||
|
require.ErrorIs(t, ErrNotFound, err)
|
||||||
|
require.Equal(t, 0, len(got))
|
||||||
|
})
|
||||||
|
t.Run("saving a blob with older slot", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
|
||||||
|
})
|
||||||
|
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||||
|
db := setupDB(t)
|
||||||
|
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||||
|
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||||
|
oldBlockRoot := scs[0].BlockRoot
|
||||||
|
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(scs, got))
|
||||||
|
|
||||||
|
newScs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||||
|
newScs[0].Slot = scs[0].Slot + newRetentionSlot
|
||||||
|
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||||
|
|
||||||
|
_, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||||
|
require.ErrorIs(t, ErrNotFound, err)
|
||||||
|
|
||||||
|
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, equalBlobSlices(newScs, got))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||||
|
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||||
|
}
|
||||||
|
return blobSidecars
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||||
|
blockRoot := make([]byte, 32)
|
||||||
|
_, err := rand.Read(blockRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, err)
|
||||||
|
slot := make([]byte, 8)
|
||||||
|
_, err = rand.Read(slot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
blockParentRoot := make([]byte, 32)
|
||||||
|
_, err = rand.Read(blockParentRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
proposerIndex := make([]byte, 8)
|
||||||
|
_, err = rand.Read(proposerIndex)
|
||||||
|
require.NoError(t, err)
|
||||||
|
blobData := make([]byte, 131072)
|
||||||
|
_, err = rand.Read(blobData)
|
||||||
|
require.NoError(t, err)
|
||||||
|
blob := &enginev1.Blob{
|
||||||
|
Data: blobData,
|
||||||
|
}
|
||||||
|
kzgCommitment := make([]byte, 48)
|
||||||
|
_, err = rand.Read(kzgCommitment)
|
||||||
|
require.NoError(t, err)
|
||||||
|
kzgProof := make([]byte, 48)
|
||||||
|
_, err = rand.Read(kzgProof)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return ðpb.BlobSidecar{
|
||||||
|
BlockRoot: blockRoot,
|
||||||
|
Index: index,
|
||||||
|
Slot: primitives.Slot(binary.LittleEndian.Uint64(slot)),
|
||||||
|
BlockParentRoot: blockParentRoot,
|
||||||
|
ProposerIndex: primitives.ValidatorIndex(binary.LittleEndian.Uint64(proposerIndex)),
|
||||||
|
Blob: blob,
|
||||||
|
KzgCommitment: kzgCommitment,
|
||||||
|
KzgProof: kzgProof,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
|||||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||||
}
|
}
|
||||||
|
case hasDenebKey(enc):
|
||||||
|
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||||
|
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case hasDenebBlindKey(enc):
|
||||||
|
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||||
|
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
// Marshal block bytes to phase 0 beacon block.
|
// Marshal block bytes to phase 0 beacon block.
|
||||||
rawBlock = ðpb.SignedBeaconBlock{}
|
rawBlock = ðpb.SignedBeaconBlock{}
|
||||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch blk.Version() {
|
switch blk.Version() {
|
||||||
|
case version.Deneb:
|
||||||
|
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||||
case version.Bellatrix:
|
case version.Bellatrix:
|
||||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
|||||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||||
}
|
}
|
||||||
switch blk.Version() {
|
switch blk.Version() {
|
||||||
|
case version.Deneb:
|
||||||
|
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||||
case version.Bellatrix:
|
case version.Bellatrix:
|
||||||
|
|||||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
|||||||
}
|
}
|
||||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasDenebKey(enc []byte) bool {
|
||||||
|
if len(denebKey) >= len(enc) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasDenebBlindKey(enc []byte) bool {
|
||||||
|
if len(denebBlindKey) >= len(enc) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||||
|
}
|
||||||
|
|||||||
@@ -129,6 +129,8 @@ var Buckets = [][]byte{
|
|||||||
|
|
||||||
feeRecipientBucket,
|
feeRecipientBucket,
|
||||||
registrationBucket,
|
registrationBucket,
|
||||||
|
|
||||||
|
blobsBucket,
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ var (
|
|||||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||||
powchainDataKey = []byte("powchain-data")
|
powchainDataKey = []byte("powchain-data")
|
||||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||||
|
blobsBucket = []byte("blobs")
|
||||||
|
|
||||||
// Below keys are used to identify objects are to be fork compatible.
|
// Below keys are used to identify objects are to be fork compatible.
|
||||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||||
@@ -55,6 +56,9 @@ var (
|
|||||||
capellaKey = []byte("capella")
|
capellaKey = []byte("capella")
|
||||||
capellaBlindKey = []byte("blind-capella")
|
capellaBlindKey = []byte("blind-capella")
|
||||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||||
|
denebKey = []byte("deneb")
|
||||||
|
denebBlindKey = []byte("blind-deneb")
|
||||||
|
|
||||||
// block root included in the beacon state used by weak subjectivity initial sync
|
// block root included in the beacon state used by weak subjectivity initial sync
|
||||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||||
|
|||||||
@@ -473,6 +473,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
case hasDenebKey(enc):
|
||||||
|
protoState := ðpb.BeaconStateDeneb{}
|
||||||
|
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||||
|
}
|
||||||
|
ok, err := s.isStateValidatorMigrationOver()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
protoState.Validators = validatorEntries
|
||||||
|
}
|
||||||
|
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||||
case hasCapellaKey(enc):
|
case hasCapellaKey(enc):
|
||||||
// Marshal state bytes to capella beacon state.
|
// Marshal state bytes to capella beacon state.
|
||||||
protoState := ðpb.BeaconStateCapella{}
|
protoState := ðpb.BeaconStateCapella{}
|
||||||
@@ -580,6 +593,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||||
|
case *ethpb.BeaconStateDeneb:
|
||||||
|
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("non valid inner state")
|
||||||
|
}
|
||||||
|
if rState == nil {
|
||||||
|
return nil, errors.New("nil state")
|
||||||
|
}
|
||||||
|
rawObj, err := rState.MarshalSSZ()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("invalid inner state")
|
return nil, errors.New("invalid inner state")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ const (
|
|||||||
NewPayloadMethod = "engine_newPayloadV1"
|
NewPayloadMethod = "engine_newPayloadV1"
|
||||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||||
|
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||||
@@ -43,6 +44,9 @@ const (
|
|||||||
GetPayloadMethod = "engine_getPayloadV1"
|
GetPayloadMethod = "engine_getPayloadV1"
|
||||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||||
|
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||||
|
// GetBlobsBundleMethod v1 request string for JSON-RPC.
|
||||||
|
GetBlobsBundleMethod = "engine_getBlobsBundleV1"
|
||||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||||
@@ -89,6 +93,7 @@ type EngineCaller interface {
|
|||||||
) error
|
) error
|
||||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||||
|
GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||||
@@ -126,6 +131,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, handleRPCError(err)
|
return nil, handleRPCError(err)
|
||||||
}
|
}
|
||||||
|
case *pb.ExecutionPayloadDeneb:
|
||||||
|
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||||
|
}
|
||||||
|
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleRPCError(err)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("unknown execution data type")
|
return nil, errors.New("unknown execution data type")
|
||||||
}
|
}
|
||||||
@@ -173,7 +187,7 @@ func (s *Service) ForkchoiceUpdated(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, handleRPCError(err)
|
return nil, nil, handleRPCError(err)
|
||||||
}
|
}
|
||||||
case version.Capella:
|
case version.Capella, version.Deneb:
|
||||||
a, err := attrs.PbV2()
|
a, err := attrs.PbV2()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -215,6 +229,15 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
|||||||
ctx, cancel := context.WithDeadline(ctx, d)
|
ctx, cancel := context.WithDeadline(ctx, d)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
result := &pb.ExecutionPayloadDenebWithValue{}
|
||||||
|
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleRPCError(err)
|
||||||
|
}
|
||||||
|
return blocks.WrappedExecutionPayloadDeneb(result.Payload, big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value)))
|
||||||
|
}
|
||||||
|
|
||||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||||
@@ -422,6 +445,19 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
|||||||
return execBlks, nil
|
return execBlks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlobsBundle calls the engine_getBlobsV1 method via JSON-RPC.
|
||||||
|
func (s *Service) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsBundle")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
d := time.Now().Add(defaultEngineTimeout)
|
||||||
|
ctx, cancel := context.WithDeadline(ctx, d)
|
||||||
|
defer cancel()
|
||||||
|
result := &pb.BlobsBundle{}
|
||||||
|
err := s.rpcClient.CallContext(ctx, result, GetBlobsBundleMethod, pb.PayloadIDBytes(payloadId))
|
||||||
|
return result, handleRPCError(err)
|
||||||
|
}
|
||||||
|
|
||||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||||
var hdr *types.HeaderInfo
|
var hdr *types.HeaderInfo
|
||||||
|
|||||||
@@ -405,8 +405,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
|||||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||||
web3Service.httpLogger = testAcc.Backend
|
web3Service.httpLogger = testAcc.Backend
|
||||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
bConfig := params.MinimalSpecConfig().Copy()
|
bConfig := params.MinimalSpecConfig().Copy()
|
||||||
bConfig.MinGenesisTime = 0
|
bConfig.MinGenesisTime = 0
|
||||||
bConfig.SecondsPerETH1Block = 10
|
bConfig.SecondsPerETH1Block = 10
|
||||||
@@ -444,8 +444,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
|||||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||||
testAcc.Backend.Commit()
|
testAcc.Backend.Commit()
|
||||||
}
|
}
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
|
|
||||||
// Set up our subscriber now to listen for the chain started event.
|
// Set up our subscriber now to listen for the chain started event.
|
||||||
stateChannel := make(chan *feed.Event, 1)
|
stateChannel := make(chan *feed.Event, 1)
|
||||||
@@ -502,8 +502,8 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
|||||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||||
web3Service.httpLogger = testAcc.Backend
|
web3Service.httpLogger = testAcc.Backend
|
||||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
bConfig := params.MinimalSpecConfig().Copy()
|
bConfig := params.MinimalSpecConfig().Copy()
|
||||||
bConfig.SecondsPerETH1Block = 10
|
bConfig.SecondsPerETH1Block = 10
|
||||||
params.OverrideBeaconConfig(bConfig)
|
params.OverrideBeaconConfig(bConfig)
|
||||||
@@ -540,14 +540,14 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
|||||||
for i := uint64(0); i < 1500; i++ {
|
for i := uint64(0); i < 1500; i++ {
|
||||||
testAcc.Backend.Commit()
|
testAcc.Backend.Commit()
|
||||||
}
|
}
|
||||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time()
|
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
|
|
||||||
// Forward the chain to account for the follow distance
|
// Forward the chain to account for the follow distance
|
||||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||||
testAcc.Backend.Commit()
|
testAcc.Backend.Commit()
|
||||||
}
|
}
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
|
|
||||||
// Set the genesis time 500 blocks ahead of the last
|
// Set the genesis time 500 blocks ahead of the last
|
||||||
// deposit log.
|
// deposit log.
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package execution
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -14,7 +15,6 @@ import (
|
|||||||
contracts "github.com/prysmaticlabs/prysm/v4/contracts/deposit"
|
contracts "github.com/prysmaticlabs/prysm/v4/contracts/deposit"
|
||||||
"github.com/prysmaticlabs/prysm/v4/io/logs"
|
"github.com/prysmaticlabs/prysm/v4/io/logs"
|
||||||
"github.com/prysmaticlabs/prysm/v4/network"
|
"github.com/prysmaticlabs/prysm/v4/network"
|
||||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||||
@@ -34,7 +34,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
|||||||
}
|
}
|
||||||
s.depositContractCaller = depositContractCaller
|
s.depositContractCaller = depositContractCaller
|
||||||
|
|
||||||
// Ensure we have the correct chain and deposit IDs.
|
//Ensure we have the correct chain and deposit IDs.
|
||||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||||
client.Close()
|
client.Close()
|
||||||
errStr := err.Error()
|
errStr := err.Error()
|
||||||
@@ -113,9 +113,21 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
headers := http.Header{}
|
||||||
|
for _, h := range s.cfg.headers {
|
||||||
|
if h == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
keyValue := strings.Split(h, "=")
|
||||||
|
if len(keyValue) < 2 {
|
||||||
|
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
headers.Set(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||||
|
}
|
||||||
switch u.Scheme {
|
switch u.Scheme {
|
||||||
case "http", "https":
|
case "http", "https":
|
||||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
|
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()), gethRPC.WithHeaders(headers))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -127,13 +139,6 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
|||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||||
}
|
}
|
||||||
if endpoint.Auth.Method != authorization.None {
|
|
||||||
header, err := endpoint.Auth.ToHeaderValue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.SetHeader("Authorization", header)
|
|
||||||
}
|
|
||||||
for _, h := range s.cfg.headers {
|
for _, h := range s.cfg.headers {
|
||||||
if h != "" {
|
if h != "" {
|
||||||
keyValue := strings.Split(h, "=")
|
keyValue := strings.Split(h, "=")
|
||||||
|
|||||||
@@ -210,14 +210,14 @@ func TestFollowBlock_OK(t *testing.T) {
|
|||||||
|
|
||||||
web3Service = setDefaultMocks(web3Service)
|
web3Service = setDefaultMocks(web3Service)
|
||||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
// process follow_distance blocks
|
// process follow_distance blocks
|
||||||
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ {
|
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ {
|
||||||
testAcc.Backend.Commit()
|
testAcc.Backend.Commit()
|
||||||
}
|
}
|
||||||
// set current height
|
// set current height
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
|
|
||||||
h, err := web3Service.followedBlockHeight(context.Background())
|
h, err := web3Service.followedBlockHeight(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -229,8 +229,8 @@ func TestFollowBlock_OK(t *testing.T) {
|
|||||||
testAcc.Backend.Commit()
|
testAcc.Backend.Commit()
|
||||||
}
|
}
|
||||||
// set current height
|
// set current height
|
||||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||||
|
|
||||||
h, err = web3Service.followedBlockHeight(context.Background())
|
h, err = web3Service.followedBlockHeight(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ type EngineClient struct {
|
|||||||
TerminalBlockHash []byte
|
TerminalBlockHash []byte
|
||||||
TerminalBlockHashExists bool
|
TerminalBlockHashExists bool
|
||||||
OverrideValidHash [32]byte
|
OverrideValidHash [32]byte
|
||||||
|
BlobsBundle *pb.BlobsBundle
|
||||||
BlockValue *big.Int
|
BlockValue *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,3 +173,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
|||||||
blk = parentBlk
|
blk = parentBlk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlobsBundle --
|
||||||
|
func (e *EngineClient) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||||
|
return e.BlobsBundle, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -141,6 +141,35 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||||
|
// broadcasted to the current fork and to the input subnet.
|
||||||
|
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||||
|
defer span.End()
|
||||||
|
forkDigest, err := s.currentForkDigest()
|
||||||
|
if err != nil {
|
||||||
|
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||||
|
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||||
|
defer span.End()
|
||||||
|
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||||
|
|
||||||
|
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||||
|
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@@ -232,3 +261,7 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
|||||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||||
|
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ func (s *Service) forkWatcher() {
|
|||||||
currEpoch := slots.ToEpoch(currSlot)
|
currEpoch := slots.ToEpoch(currSlot)
|
||||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||||
currEpoch == params.BeaconConfig().CapellaForkEpoch {
|
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||||
|
currEpoch == params.BeaconConfig().DenebForkEpoch {
|
||||||
// If we are in the fork epoch, we update our enr with
|
// If we are in the fork epoch, we update our enr with
|
||||||
// the updated fork digest. These repeatedly does
|
// the updated fork digest. These repeatedly does
|
||||||
// this over the epoch, which might be slightly wasteful
|
// this over the epoch, which might be slightly wasteful
|
||||||
|
|||||||
@@ -119,6 +119,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
|||||||
return defaultProposerSlashingTopicParams(), nil
|
return defaultProposerSlashingTopicParams(), nil
|
||||||
case strings.Contains(topic, GossipAttesterSlashingMessage):
|
case strings.Contains(topic, GossipAttesterSlashingMessage):
|
||||||
return defaultAttesterSlashingTopicParams(), nil
|
return defaultAttesterSlashingTopicParams(), nil
|
||||||
|
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||||
|
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||||
|
return defaultBlockTopicParams(), nil
|
||||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -21,12 +21,16 @@ var gossipTopicMappings = map[string]proto.Message{
|
|||||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||||
|
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// GossipTopicMappings is a function to return the assigned data type
|
// GossipTopicMappings is a function to return the assigned data type
|
||||||
// versioned by epoch.
|
// versioned by epoch.
|
||||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||||
if topic == BlockSubnetTopicFormat {
|
if topic == BlockSubnetTopicFormat {
|
||||||
|
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
return ðpb.SignedBeaconBlockDeneb{}
|
||||||
|
}
|
||||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||||
return ðpb.SignedBeaconBlockCapella{}
|
return ðpb.SignedBeaconBlockCapella{}
|
||||||
}
|
}
|
||||||
@@ -64,4 +68,6 @@ func init() {
|
|||||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||||
// Specially handle Capella objects
|
// Specially handle Capella objects
|
||||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
|
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
|
||||||
|
// Specially handle Deneb objects
|
||||||
|
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ type Broadcaster interface {
|
|||||||
Broadcast(context.Context, proto.Message) error
|
Broadcast(context.Context, proto.Message) error
|
||||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||||
|
BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||||
|
|||||||
@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
|
|||||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||||
|
return false
|
||||||
|
}
|
||||||
switch parts[2] {
|
switch parts[2] {
|
||||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||||
case fmt.Sprintf("%x", altairForkDigest):
|
case fmt.Sprintf("%x", altairForkDigest):
|
||||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||||
case fmt.Sprintf("%x", capellaForkDigest):
|
case fmt.Sprintf("%x", capellaForkDigest):
|
||||||
|
case fmt.Sprintf("%x", denebForkDigest):
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
|||||||
formatting := []interface{}{digest}
|
formatting := []interface{}{digest}
|
||||||
|
|
||||||
// Special case for attestation subnets which have a second formatting placeholder.
|
// Special case for attestation subnets which have a second formatting placeholder.
|
||||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||||
formatting = append(formatting, 0 /* some subnet ID */)
|
formatting = append(formatting, 0 /* some subnet ID */)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,6 +37,9 @@ const PingMessageName = "/ping"
|
|||||||
// MetadataMessageName specifies the name for the metadata message topic.
|
// MetadataMessageName specifies the name for the metadata message topic.
|
||||||
const MetadataMessageName = "/metadata"
|
const MetadataMessageName = "/metadata"
|
||||||
|
|
||||||
|
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||||
|
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// V1 RPC Topics
|
// V1 RPC Topics
|
||||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||||
@@ -52,6 +55,15 @@ const (
|
|||||||
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
|
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
|
||||||
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
|
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
|
||||||
|
|
||||||
|
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||||
|
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||||
|
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||||
|
|
||||||
|
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
|
||||||
|
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
|
||||||
|
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||||
|
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
|
||||||
|
|
||||||
// V2 RPC Topics
|
// V2 RPC Topics
|
||||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||||
@@ -83,6 +95,10 @@ var RPCTopicMappings = map[string]interface{}{
|
|||||||
// RPC Metadata Message
|
// RPC Metadata Message
|
||||||
RPCMetaDataTopicV1: new(interface{}),
|
RPCMetaDataTopicV1: new(interface{}),
|
||||||
RPCMetaDataTopicV2: new(interface{}),
|
RPCMetaDataTopicV2: new(interface{}),
|
||||||
|
// BlobSidecarsByRange v1 Message
|
||||||
|
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||||
|
// BlobSidecarsByRoot v1 Message
|
||||||
|
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maps all registered protocol prefixes.
|
// Maps all registered protocol prefixes.
|
||||||
@@ -99,6 +115,8 @@ var messageMapping = map[string]bool{
|
|||||||
BeaconBlocksByRootsMessageName: true,
|
BeaconBlocksByRootsMessageName: true,
|
||||||
PingMessageName: true,
|
PingMessageName: true,
|
||||||
MetadataMessageName: true,
|
MetadataMessageName: true,
|
||||||
|
BlobSidecarsByRangeName: true,
|
||||||
|
BlobSidecarsByRootName: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maps all the RPC messages which are to updated in altair.
|
// Maps all the RPC messages which are to updated in altair.
|
||||||
@@ -113,6 +131,15 @@ var versionMapping = map[string]bool{
|
|||||||
SchemaVersionV2: true,
|
SchemaVersionV2: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var PreAltairV1SchemaMapping = map[string]bool{
|
||||||
|
StatusMessageName: true,
|
||||||
|
GoodbyeMessageName: true,
|
||||||
|
BeaconBlocksByRangeMessageName: true,
|
||||||
|
BeaconBlocksByRootsMessageName: true,
|
||||||
|
PingMessageName: true,
|
||||||
|
MetadataMessageName: true,
|
||||||
|
}
|
||||||
|
|
||||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||||
// message type is correct.
|
// message type is correct.
|
||||||
func VerifyTopicMapping(topic string, msg interface{}) error {
|
func VerifyTopicMapping(topic string, msg interface{}) error {
|
||||||
|
|||||||
@@ -138,6 +138,11 @@ func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BroadcastBlob -- fake.
|
||||||
|
func (p *FakeP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// BroadcastSyncCommitteeMessage -- fake.
|
// BroadcastSyncCommitteeMessage -- fake.
|
||||||
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -33,3 +33,9 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
|||||||
m.BroadcastCalled = true
|
m.BroadcastCalled = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||||
|
func (m *MockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||||
|
m.BroadcastCalled = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -170,6 +170,11 @@ func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *TestP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||||
|
p.BroadcastCalled = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||||
p.BroadcastCalled = true
|
p.BroadcastCalled = true
|
||||||
|
|||||||
@@ -28,6 +28,8 @@ const (
|
|||||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||||
|
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||||
|
GossipBlobSidecarMessage = "blob_sidecar"
|
||||||
|
|
||||||
// Topic Formats
|
// Topic Formats
|
||||||
//
|
//
|
||||||
@@ -49,4 +51,6 @@ const (
|
|||||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||||
|
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||||
|
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -53,6 +53,11 @@ func InitializeDataMaps() {
|
|||||||
ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}},
|
ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}},
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
|
return blocks.NewSignedBeaconBlock(
|
||||||
|
ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}},
|
||||||
|
)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset our metadata map.
|
// Reset our metadata map.
|
||||||
@@ -69,5 +74,8 @@ func InitializeDataMaps() {
|
|||||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||||
},
|
},
|
||||||
|
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||||
|
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,4 +12,5 @@ var (
|
|||||||
ErrRateLimited = errors.New("rate limited")
|
ErrRateLimited = errors.New("rate limited")
|
||||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||||
|
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
ssz "github.com/prysmaticlabs/fastssz"
|
ssz "github.com/prysmaticlabs/fastssz"
|
||||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const rootLength = 32
|
const rootLength = 32
|
||||||
@@ -31,6 +32,7 @@ func (b *SSZBytes) HashTreeRootWith(hh *ssz.Hasher) error {
|
|||||||
|
|
||||||
// BeaconBlockByRootsReq specifies the block by roots request type.
|
// BeaconBlockByRootsReq specifies the block by roots request type.
|
||||||
type BeaconBlockByRootsReq [][rootLength]byte
|
type BeaconBlockByRootsReq [][rootLength]byte
|
||||||
|
type BlobSidecarsByRootReq []*eth.BlobIdentifier
|
||||||
|
|
||||||
// MarshalSSZTo marshals the block by roots request with the provided byte slice.
|
// MarshalSSZTo marshals the block by roots request with the provided byte slice.
|
||||||
func (r *BeaconBlockByRootsReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
func (r *BeaconBlockByRootsReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||||
|
|||||||
@@ -503,6 +503,13 @@ type capellaBlockResponseJson struct {
|
|||||||
Finalized bool `json:"finalized"`
|
Finalized bool `json:"finalized"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type denebBlockResponseJson struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Data *SignedBeaconBlockDenebContainerJson `json:"data"`
|
||||||
|
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||||
|
Finalized bool `json:"finalized"`
|
||||||
|
}
|
||||||
|
|
||||||
type bellatrixBlindedBlockResponseJson struct {
|
type bellatrixBlindedBlockResponseJson struct {
|
||||||
Version string `json:"version" enum:"true"`
|
Version string `json:"version" enum:"true"`
|
||||||
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
|
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
|
||||||
@@ -517,6 +524,12 @@ type capellaBlindedBlockResponseJson struct {
|
|||||||
Finalized bool `json:"finalized"`
|
Finalized bool `json:"finalized"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type denebBlindedBlockResponseJson struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Data *SignedBlindedBeaconBlockDenebContainerJson `json:"data"`
|
||||||
|
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||||
|
}
|
||||||
|
|
||||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -565,6 +578,16 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
|||||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||||
Finalized: respContainer.Finalized,
|
Finalized: respContainer.Finalized,
|
||||||
}
|
}
|
||||||
|
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||||
|
actualRespContainer = &denebBlockResponseJson{
|
||||||
|
Version: respContainer.Version,
|
||||||
|
Data: &SignedBeaconBlockDenebContainerJson{
|
||||||
|
Message: respContainer.Data.DenebBlock,
|
||||||
|
Signature: respContainer.Data.Signature,
|
||||||
|
},
|
||||||
|
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||||
|
Finalized: respContainer.Finalized,
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||||
}
|
}
|
||||||
@@ -624,6 +647,15 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
|||||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||||
Finalized: respContainer.Finalized,
|
Finalized: respContainer.Finalized,
|
||||||
}
|
}
|
||||||
|
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||||
|
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||||
|
Version: respContainer.Version,
|
||||||
|
Data: &SignedBlindedBeaconBlockDenebContainerJson{
|
||||||
|
Message: respContainer.Data.DenebBlock,
|
||||||
|
Signature: respContainer.Data.Signature,
|
||||||
|
},
|
||||||
|
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||||
}
|
}
|
||||||
@@ -655,6 +687,11 @@ type capellaStateResponseJson struct {
|
|||||||
Data *BeaconStateCapellaJson `json:"data"`
|
Data *BeaconStateCapellaJson `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type denebStateResponseJson struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Data *BeaconStateDenebJson `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -683,6 +720,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
|
|||||||
Version: respContainer.Version,
|
Version: respContainer.Version,
|
||||||
Data: respContainer.Data.CapellaState,
|
Data: respContainer.Data.CapellaState,
|
||||||
}
|
}
|
||||||
|
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||||
|
actualRespContainer = &denebStateResponseJson{
|
||||||
|
Version: respContainer.Version,
|
||||||
|
Data: respContainer.Data.DenebState,
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -395,6 +395,7 @@ type SignedBeaconBlockContainerV2Json struct {
|
|||||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||||
|
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||||
Signature string `json:"signature" hex:"true"`
|
Signature string `json:"signature" hex:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -403,6 +404,7 @@ type SignedBlindedBeaconBlockContainerJson struct {
|
|||||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||||
|
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||||
Signature string `json:"signature" hex:"true"`
|
Signature string `json:"signature" hex:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -411,6 +413,7 @@ type BeaconBlockContainerV2Json struct {
|
|||||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||||
|
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlindedBeaconBlockContainerJson struct {
|
type BlindedBeaconBlockContainerJson struct {
|
||||||
@@ -418,6 +421,7 @@ type BlindedBeaconBlockContainerJson struct {
|
|||||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||||
|
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SignedBeaconBlockAltairContainerJson struct {
|
type SignedBeaconBlockAltairContainerJson struct {
|
||||||
@@ -435,6 +439,11 @@ type SignedBeaconBlockCapellaContainerJson struct {
|
|||||||
Signature string `json:"signature" hex:"true"`
|
Signature string `json:"signature" hex:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SignedBeaconBlockDenebContainerJson struct {
|
||||||
|
Message *BeaconBlockDenebJson `json:"message"`
|
||||||
|
Signature string `json:"signature" hex:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
|
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
|
||||||
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
|
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
|
||||||
Signature string `json:"signature" hex:"true"`
|
Signature string `json:"signature" hex:"true"`
|
||||||
@@ -445,6 +454,11 @@ type SignedBlindedBeaconBlockCapellaContainerJson struct {
|
|||||||
Signature string `json:"signature" hex:"true"`
|
Signature string `json:"signature" hex:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SignedBlindedBeaconBlockDenebContainerJson struct {
|
||||||
|
Message *BlindedBeaconBlockDenebJson `json:"message"`
|
||||||
|
Signature string `json:"signature" hex:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
type BeaconBlockAltairJson struct {
|
type BeaconBlockAltairJson struct {
|
||||||
Slot string `json:"slot"`
|
Slot string `json:"slot"`
|
||||||
ProposerIndex string `json:"proposer_index"`
|
ProposerIndex string `json:"proposer_index"`
|
||||||
@@ -469,6 +483,14 @@ type BeaconBlockCapellaJson struct {
|
|||||||
Body *BeaconBlockBodyCapellaJson `json:"body"`
|
Body *BeaconBlockBodyCapellaJson `json:"body"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BeaconBlockDenebJson struct {
|
||||||
|
Slot string `json:"slot"`
|
||||||
|
ProposerIndex string `json:"proposer_index"`
|
||||||
|
ParentRoot string `json:"parent_root" hex:"true"`
|
||||||
|
StateRoot string `json:"state_root" hex:"true"`
|
||||||
|
Body *BeaconBlockBodyDenebJson `json:"body"`
|
||||||
|
}
|
||||||
|
|
||||||
type BlindedBeaconBlockBellatrixJson struct {
|
type BlindedBeaconBlockBellatrixJson struct {
|
||||||
Slot string `json:"slot"`
|
Slot string `json:"slot"`
|
||||||
ProposerIndex string `json:"proposer_index"`
|
ProposerIndex string `json:"proposer_index"`
|
||||||
@@ -485,6 +507,14 @@ type BlindedBeaconBlockCapellaJson struct {
|
|||||||
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BlindedBeaconBlockDenebJson struct {
|
||||||
|
Slot string `json:"slot"`
|
||||||
|
ProposerIndex string `json:"proposer_index"`
|
||||||
|
ParentRoot string `json:"parent_root" hex:"true"`
|
||||||
|
StateRoot string `json:"state_root" hex:"true"`
|
||||||
|
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
|
||||||
|
}
|
||||||
|
|
||||||
type BeaconBlockBodyAltairJson struct {
|
type BeaconBlockBodyAltairJson struct {
|
||||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||||
@@ -524,6 +554,21 @@ type BeaconBlockBodyCapellaJson struct {
|
|||||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BeaconBlockBodyDenebJson struct {
|
||||||
|
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||||
|
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||||
|
Graffiti string `json:"graffiti" hex:"true"`
|
||||||
|
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||||
|
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||||
|
Attestations []*AttestationJson `json:"attestations"`
|
||||||
|
Deposits []*DepositJson `json:"deposits"`
|
||||||
|
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||||
|
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||||
|
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
|
||||||
|
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||||
|
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
type BlindedBeaconBlockBodyBellatrixJson struct {
|
type BlindedBeaconBlockBodyBellatrixJson struct {
|
||||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||||
@@ -551,6 +596,21 @@ type BlindedBeaconBlockBodyCapellaJson struct {
|
|||||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BlindedBeaconBlockBodyDenebJson struct {
|
||||||
|
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||||
|
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||||
|
Graffiti string `json:"graffiti" hex:"true"`
|
||||||
|
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||||
|
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||||
|
Attestations []*AttestationJson `json:"attestations"`
|
||||||
|
Deposits []*DepositJson `json:"deposits"`
|
||||||
|
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||||
|
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||||
|
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
|
||||||
|
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||||
|
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
type ExecutionPayloadJson struct {
|
type ExecutionPayloadJson struct {
|
||||||
ParentHash string `json:"parent_hash" hex:"true"`
|
ParentHash string `json:"parent_hash" hex:"true"`
|
||||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||||
@@ -586,6 +646,25 @@ type ExecutionPayloadCapellaJson struct {
|
|||||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ExecutionPayloadDenebJson struct {
|
||||||
|
ParentHash string `json:"parent_hash" hex:"true"`
|
||||||
|
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||||
|
StateRoot string `json:"state_root" hex:"true"`
|
||||||
|
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||||
|
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||||
|
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||||
|
BlockNumber string `json:"block_number"`
|
||||||
|
GasLimit string `json:"gas_limit"`
|
||||||
|
GasUsed string `json:"gas_used"`
|
||||||
|
TimeStamp string `json:"timestamp"`
|
||||||
|
ExtraData string `json:"extra_data" hex:"true"`
|
||||||
|
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||||
|
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||||
|
BlockHash string `json:"block_hash" hex:"true"`
|
||||||
|
Transactions []string `json:"transactions" hex:"true"`
|
||||||
|
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||||
|
}
|
||||||
|
|
||||||
type ExecutionPayloadHeaderJson struct {
|
type ExecutionPayloadHeaderJson struct {
|
||||||
ParentHash string `json:"parent_hash" hex:"true"`
|
ParentHash string `json:"parent_hash" hex:"true"`
|
||||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||||
@@ -621,6 +700,25 @@ type ExecutionPayloadHeaderCapellaJson struct {
|
|||||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ExecutionPayloadHeaderDenebJson struct {
|
||||||
|
ParentHash string `json:"parent_hash" hex:"true"`
|
||||||
|
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||||
|
StateRoot string `json:"state_root" hex:"true"`
|
||||||
|
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||||
|
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||||
|
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||||
|
BlockNumber string `json:"block_number"`
|
||||||
|
GasLimit string `json:"gas_limit"`
|
||||||
|
GasUsed string `json:"gas_used"`
|
||||||
|
TimeStamp string `json:"timestamp"`
|
||||||
|
ExtraData string `json:"extra_data" hex:"true"`
|
||||||
|
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||||
|
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||||
|
BlockHash string `json:"block_hash" hex:"true"`
|
||||||
|
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||||
|
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
type SyncAggregateJson struct {
|
type SyncAggregateJson struct {
|
||||||
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
||||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||||
@@ -877,11 +975,42 @@ type BeaconStateCapellaJson struct {
|
|||||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BeaconStateDenebJson struct {
|
||||||
|
GenesisTime string `json:"genesis_time"`
|
||||||
|
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||||
|
Slot string `json:"slot"`
|
||||||
|
Fork *ForkJson `json:"fork"`
|
||||||
|
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||||
|
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||||
|
StateRoots []string `json:"state_roots" hex:"true"`
|
||||||
|
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||||
|
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||||
|
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||||
|
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||||
|
Validators []*ValidatorJson `json:"validators"`
|
||||||
|
Balances []string `json:"balances"`
|
||||||
|
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||||
|
Slashings []string `json:"slashings"`
|
||||||
|
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||||
|
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||||
|
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||||
|
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||||
|
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||||
|
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||||
|
InactivityScores []string `json:"inactivity_scores"`
|
||||||
|
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||||
|
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||||
|
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"`
|
||||||
|
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||||
|
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||||
|
}
|
||||||
|
|
||||||
type BeaconStateContainerV2Json struct {
|
type BeaconStateContainerV2Json struct {
|
||||||
Phase0State *BeaconStateJson `json:"phase0_state"`
|
Phase0State *BeaconStateJson `json:"phase0_state"`
|
||||||
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
||||||
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
||||||
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
||||||
|
DenebState *BeaconStateDenebJson `json:"deneb_state"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ForkJson struct {
|
type ForkJson struct {
|
||||||
|
|||||||
@@ -382,6 +382,14 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
|||||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||||
}
|
}
|
||||||
|
result, err = bs.getBlockDeneb(ctx, blk)
|
||||||
|
if result != nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
// ErrUnsupportedGetter means that we have another block type
|
||||||
|
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||||
|
}
|
||||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,6 +444,14 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
|||||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||||
}
|
}
|
||||||
|
result, err = bs.getSSZBlockDeneb(ctx, blk)
|
||||||
|
if result != nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
// ErrUnsupportedGetter means that we have another block type
|
||||||
|
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||||
}
|
}
|
||||||
@@ -817,6 +833,76 @@ func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySi
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||||
|
denebBlk, err := blk.PbDenebBlock()
|
||||||
|
if err != nil {
|
||||||
|
// ErrUnsupportedGetter means that we have another block type
|
||||||
|
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
|
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||||
|
if blindedDenebBlk == nil {
|
||||||
|
return nil, errNilBlock
|
||||||
|
}
|
||||||
|
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||||
|
}
|
||||||
|
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||||
|
}
|
||||||
|
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||||
|
}
|
||||||
|
root, err := blk.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get block root")
|
||||||
|
}
|
||||||
|
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||||
|
}
|
||||||
|
sig := blk.Signature()
|
||||||
|
return ðpbv2.BlockResponseV2{
|
||||||
|
Version: ethpbv2.Version_Deneb,
|
||||||
|
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||||
|
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||||
|
Signature: sig[:],
|
||||||
|
},
|
||||||
|
ExecutionOptimistic: isOptimistic,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if denebBlk == nil {
|
||||||
|
return nil, errNilBlock
|
||||||
|
}
|
||||||
|
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||||
|
}
|
||||||
|
root, err := blk.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get block root")
|
||||||
|
}
|
||||||
|
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||||
|
}
|
||||||
|
sig := blk.Signature()
|
||||||
|
return ðpbv2.BlockResponseV2{
|
||||||
|
Version: ethpbv2.Version_Deneb,
|
||||||
|
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||||
|
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||||
|
Signature: sig[:],
|
||||||
|
},
|
||||||
|
ExecutionOptimistic: isOptimistic,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||||
phase0Blk, err := blk.PbPhase0Block()
|
phase0Blk, err := blk.PbPhase0Block()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1012,6 +1098,82 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnl
|
|||||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||||
|
denebBlk, err := blk.PbDenebBlock()
|
||||||
|
if err != nil {
|
||||||
|
// ErrUnsupportedGetter means that we have another block type
|
||||||
|
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||||
|
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||||
|
if blindedDenebBlk == nil {
|
||||||
|
return nil, errNilBlock
|
||||||
|
}
|
||||||
|
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||||
|
}
|
||||||
|
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||||
|
}
|
||||||
|
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||||
|
}
|
||||||
|
root, err := blk.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get block root")
|
||||||
|
}
|
||||||
|
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||||
|
}
|
||||||
|
sig := blk.Signature()
|
||||||
|
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||||
|
Message: v2Blk,
|
||||||
|
Signature: sig[:],
|
||||||
|
}
|
||||||
|
sszData, err := data.MarshalSSZ()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||||
|
}
|
||||||
|
return ðpbv2.SSZContainer{
|
||||||
|
Version: ethpbv2.Version_Deneb,
|
||||||
|
ExecutionOptimistic: isOptimistic,
|
||||||
|
Data: sszData,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if denebBlk == nil {
|
||||||
|
return nil, errNilBlock
|
||||||
|
}
|
||||||
|
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||||
|
}
|
||||||
|
root, err := blk.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get block root")
|
||||||
|
}
|
||||||
|
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||||
|
}
|
||||||
|
sig := blk.Signature()
|
||||||
|
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||||
|
Message: v2Blk,
|
||||||
|
Signature: sig[:],
|
||||||
|
}
|
||||||
|
sszData, err := data.MarshalSSZ()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||||
|
}
|
||||||
|
return ðpbv2.SSZContainer{Version: ethpbv2.Version_Deneb, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
||||||
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -104,6 +104,8 @@ func TestGetSpec(t *testing.T) {
|
|||||||
config.MaxWithdrawalsPerPayload = 74
|
config.MaxWithdrawalsPerPayload = 74
|
||||||
config.MaxBlsToExecutionChanges = 75
|
config.MaxBlsToExecutionChanges = 75
|
||||||
config.MaxValidatorsPerWithdrawalsSweep = 76
|
config.MaxValidatorsPerWithdrawalsSweep = 76
|
||||||
|
config.DenebForkEpoch = 77
|
||||||
|
config.DenebForkVersion = []byte("DenebForkVersion")
|
||||||
|
|
||||||
var dbp [4]byte
|
var dbp [4]byte
|
||||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||||
@@ -129,6 +131,9 @@ func TestGetSpec(t *testing.T) {
|
|||||||
var dam [4]byte
|
var dam [4]byte
|
||||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||||
config.DomainApplicationMask = dam
|
config.DomainApplicationMask = dam
|
||||||
|
var dbs [4]byte
|
||||||
|
copy(dbs[:], []byte{'0', '0', '0', '8'})
|
||||||
|
config.DomainBlobSidecar = dbs
|
||||||
|
|
||||||
params.OverrideBeaconConfig(config)
|
params.OverrideBeaconConfig(config)
|
||||||
|
|
||||||
@@ -136,7 +141,7 @@ func TestGetSpec(t *testing.T) {
|
|||||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 105, len(resp.Data))
|
assert.Equal(t, 108, len(resp.Data))
|
||||||
for k, v := range resp.Data {
|
for k, v := range resp.Data {
|
||||||
switch k {
|
switch k {
|
||||||
case "CONFIG_NAME":
|
case "CONFIG_NAME":
|
||||||
@@ -362,8 +367,14 @@ func TestGetSpec(t *testing.T) {
|
|||||||
case "REORG_WEIGHT_THRESHOLD":
|
case "REORG_WEIGHT_THRESHOLD":
|
||||||
assert.Equal(t, "20", v)
|
assert.Equal(t, "20", v)
|
||||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||||
|
case "DENEB_FORK_EPOCH":
|
||||||
|
assert.Equal(t, "77", v)
|
||||||
|
case "DENEB_FORK_VERSION":
|
||||||
|
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||||
|
case "DOMAIN_BLOB_SIDECAR":
|
||||||
|
assert.Equal(t, "0x30303038", v)
|
||||||
default:
|
default:
|
||||||
t.Errorf("Incorrect key: %s", k)
|
t.Errorf("Unknown key: %s", k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,6 +104,19 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
|||||||
ExecutionOptimistic: isOptimistic,
|
ExecutionOptimistic: isOptimistic,
|
||||||
Finalized: isFinalized,
|
Finalized: isFinalized,
|
||||||
}, nil
|
}, nil
|
||||||
|
case version.Deneb:
|
||||||
|
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||||
|
}
|
||||||
|
return ðpbv2.BeaconStateResponseV2{
|
||||||
|
Version: ethpbv2.Version_Deneb,
|
||||||
|
Data: ðpbv2.BeaconStateContainer{
|
||||||
|
State: ðpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
|
||||||
|
},
|
||||||
|
ExecutionOptimistic: isOptimistic,
|
||||||
|
}, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||||
}
|
}
|
||||||
@@ -133,6 +146,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
|||||||
ver = ethpbv2.Version_BELLATRIX
|
ver = ethpbv2.Version_BELLATRIX
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
ver = ethpbv2.Version_CAPELLA
|
ver = ethpbv2.Version_CAPELLA
|
||||||
|
case version.Deneb:
|
||||||
|
ver = ethpbv2.Version_Deneb
|
||||||
default:
|
default:
|
||||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ go_library(
|
|||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/kv:go_default_library",
|
"//beacon-chain/db/kv:go_default_library",
|
||||||
"//beacon-chain/operations/attestations:go_default_library",
|
"//beacon-chain/operations/attestations:go_default_library",
|
||||||
|
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||||
@@ -24,6 +25,7 @@ type Server struct {
|
|||||||
StateFetcher statefetcher.Fetcher
|
StateFetcher statefetcher.Fetcher
|
||||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||||
SyncCommitteePool synccommittee.Pool
|
SyncCommitteePool synccommittee.Pool
|
||||||
|
BLSChangesPool blstoexec.PoolManager
|
||||||
V1Alpha1Server *v1alpha1validator.Server
|
V1Alpha1Server *v1alpha1validator.Server
|
||||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ func convertToBlockContainer(blk interfaces.ReadOnlySignedBeaconBlock, root [32]
|
|||||||
}
|
}
|
||||||
ctr.Block = ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
|
ctr.Block = ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
|
||||||
}
|
}
|
||||||
case version.Capella:
|
case version.Capella, version.Deneb:
|
||||||
if blk.IsBlinded() {
|
if blk.IsBlinded() {
|
||||||
rBlk, err := blk.PbBlindedCapellaBlock()
|
rBlk, err := blk.PbBlindedCapellaBlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ go_library(
|
|||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/core/transition:go_default_library",
|
"//beacon-chain/core/transition:go_default_library",
|
||||||
|
"//beacon-chain/core/transition/interop:go_default_library",
|
||||||
"//beacon-chain/core/validators:go_default_library",
|
"//beacon-chain/core/validators:go_default_library",
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/kv:go_default_library",
|
"//beacon-chain/db/kv:go_default_library",
|
||||||
@@ -85,6 +86,7 @@ go_library(
|
|||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
|
|||||||
@@ -100,6 +100,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||||
|
case version.Deneb:
|
||||||
|
pb, err := data.SignedBlock.Proto()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get protobuf block")
|
||||||
|
}
|
||||||
|
phBlk, ok := pb.(*ethpb.SignedBeaconBlockDeneb)
|
||||||
|
if !ok {
|
||||||
|
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockDeneb")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: phBlk}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stream.Send(b); err != nil {
|
if err := stream.Send(b); err != nil {
|
||||||
@@ -149,6 +160,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
|||||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||||
case *ethpb.SignedBeaconBlockCapella:
|
case *ethpb.SignedBeaconBlockCapella:
|
||||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||||
|
case *ethpb.SignedBeaconBlockDeneb:
|
||||||
|
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||||
default:
|
default:
|
||||||
log.Errorf("Unknown block type %T", p)
|
log.Errorf("Unknown block type %T", p)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||||
@@ -17,6 +18,7 @@ import (
|
|||||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
@@ -24,6 +26,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
@@ -120,7 +123,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
|||||||
vs.setSyncAggregate(ctx, sBlk)
|
vs.setSyncAggregate(ctx, sBlk)
|
||||||
|
|
||||||
// Set execution data. New in Bellatrix.
|
// Set execution data. New in Bellatrix.
|
||||||
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
|
blobs, err := vs.setExecutionData(ctx, sBlk, head)
|
||||||
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,6 +133,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
|||||||
|
|
||||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
interop.WriteBlockToDisk(sBlk, true /*failed*/)
|
||||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||||
}
|
}
|
||||||
sBlk.SetStateRoot(sr)
|
sBlk.SetStateRoot(sr)
|
||||||
@@ -137,6 +142,46 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||||
}
|
}
|
||||||
|
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
blk, ok := pb.(*ethpb.BeaconBlockDeneb)
|
||||||
|
if !ok {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not cast block to BeaconBlockDeneb")
|
||||||
|
}
|
||||||
|
br, err := blk.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Better error handling. If something is wrong with the blob, we don't want to fail block production. Also should check if the kzg commitment matches.
|
||||||
|
validatorBlobs := make([]*ethpb.BlobSidecar, len(blk.Body.BlobKzgCommitments))
|
||||||
|
var gethBlobs types.Blobs
|
||||||
|
for _, b := range blobs {
|
||||||
|
var gethBlob types.Blob
|
||||||
|
copy(gethBlob[:], b.Data)
|
||||||
|
gethBlobs = append(gethBlobs, gethBlob)
|
||||||
|
}
|
||||||
|
commitments, _, proofs, err := gethBlobs.ComputeCommitmentsAndProofs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not compute commitments and proofs: %v", err)
|
||||||
|
}
|
||||||
|
for i, b := range blobs {
|
||||||
|
validatorBlobs[i] = ðpb.BlobSidecar{
|
||||||
|
BlockRoot: br[:],
|
||||||
|
Index: uint64(i),
|
||||||
|
Slot: blk.Slot,
|
||||||
|
BlockParentRoot: blk.ParentRoot,
|
||||||
|
ProposerIndex: blk.ProposerIndex,
|
||||||
|
Blob: b,
|
||||||
|
KzgCommitment: commitments[i][:],
|
||||||
|
KzgProof: proofs[i][:],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
blkAndBlobs := ðpb.BeaconBlockDenebAndBlobs{
|
||||||
|
Block: blk,
|
||||||
|
Blobs: validatorBlobs,
|
||||||
|
}
|
||||||
|
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: blkAndBlobs}}, nil
|
||||||
|
}
|
||||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||||
if sBlk.IsBlinded() {
|
if sBlk.IsBlinded() {
|
||||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
||||||
@@ -160,11 +205,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
|||||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
return vs.proposeGenericBeaconBlock(ctx, req)
|
||||||
if err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
|
||||||
}
|
|
||||||
return vs.proposeGenericBeaconBlock(ctx, blk)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||||
@@ -247,9 +288,15 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
|
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
root, err := blk.Block().HashTreeRoot()
|
root, err := blk.Block().HashTreeRoot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||||
@@ -267,16 +314,6 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not block proposal critical path with debug logging or block feed updates.
|
|
||||||
defer func() {
|
|
||||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
|
||||||
"Block proposal received via RPC")
|
|
||||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
|
||||||
Type: blockfeed.ReceivedBlock,
|
|
||||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Broadcast the new block to the network.
|
// Broadcast the new block to the network.
|
||||||
blkPb, err := blk.Proto()
|
blkPb, err := blk.Proto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -293,6 +330,28 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
|||||||
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if blk.Version() >= version.Deneb {
|
||||||
|
b, ok := req.GetBlock().(*ethpb.GenericSignedBeaconBlock_Deneb)
|
||||||
|
if !ok {
|
||||||
|
return nil, status.Error(codes.Internal, "Could not cast block to Deneb")
|
||||||
|
}
|
||||||
|
for _, sidecar := range b.Deneb.Blobs {
|
||||||
|
if err := vs.P2P.BroadcastBlob(ctx, sidecar.Message.Index, sidecar); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not broadcast blob sidecar")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not block proposal critical path with debug logging or block feed updates.
|
||||||
|
defer func() {
|
||||||
|
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||||
|
"Block proposal received via RPC")
|
||||||
|
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||||
|
Type: blockfeed.ReceivedBlock,
|
||||||
|
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
return ðpb.ProposeResponse{
|
return ðpb.ProposeResponse{
|
||||||
BlockRoot: root[:],
|
BlockRoot: root[:],
|
||||||
}, nil
|
}, nil
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
|||||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||||
// And attestations from the second group should be deleted.
|
// And attestations from the second group should be deleted.
|
||||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts, error) {
|
||||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||||
@@ -98,7 +98,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Exit early if there is an unknown state type.
|
// Exit early if there is an unknown state type.
|
||||||
return validAtts, invalidAtts
|
return validAtts, invalidAtts, errors.Errorf("unknown state type: %v", st.Version())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, att := range a {
|
for _, att := range a {
|
||||||
@@ -108,7 +108,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
|||||||
}
|
}
|
||||||
invalidAtts = append(invalidAtts, att)
|
invalidAtts = append(invalidAtts, att)
|
||||||
}
|
}
|
||||||
return validAtts, invalidAtts
|
return validAtts, invalidAtts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
|
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
|
||||||
@@ -247,7 +247,10 @@ func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.Beac
|
|||||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
|
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
|
validAtts, invalidAtts, err := proposerAtts(atts).filter(ctx, st)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
|
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,11 +37,11 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
|||||||
const blockBuilderTimeout = 1 * time.Second
|
const blockBuilderTimeout = 1 * time.Second
|
||||||
|
|
||||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) ([]*enginev1.Blob, error) {
|
||||||
idx := blk.Block().ProposerIndex()
|
idx := blk.Block().ProposerIndex()
|
||||||
slot := blk.Block().Slot()
|
slot := blk.Block().Slot()
|
||||||
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
||||||
@@ -55,14 +55,14 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
|||||||
} else {
|
} else {
|
||||||
switch {
|
switch {
|
||||||
case blk.Version() >= version.Capella:
|
case blk.Version() >= version.Capella:
|
||||||
localPayload, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
localPayload, _, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get execution payload")
|
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||||
}
|
}
|
||||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||||
localValue, err := localPayload.Value()
|
localValue, err := localPayload.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get local payload value")
|
return nil, errors.Wrap(err, "failed to get local payload value")
|
||||||
}
|
}
|
||||||
builderValue, err := builderPayload.Value()
|
builderValue, err := builderPayload.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -71,7 +71,7 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
|||||||
|
|
||||||
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
|
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to match withdrawals root")
|
return nil, errors.Wrap(err, "failed to match withdrawals root")
|
||||||
}
|
}
|
||||||
// If we can't get the builder value, just use local block.
|
// If we can't get the builder value, just use local block.
|
||||||
if builderValue.Cmp(localValue) > 0 && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
if builderValue.Cmp(localValue) > 0 && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||||
@@ -79,31 +79,38 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
|||||||
if err := blk.SetExecution(builderPayload); err != nil {
|
if err := blk.SetExecution(builderPayload); err != nil {
|
||||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"localValue": localValue,
|
"localValue": localValue,
|
||||||
"builderValue": builderValue,
|
"builderValue": builderValue,
|
||||||
}).Warn("Proposer: using local execution payload because higher value")
|
}).Warn("Proposer: using local execution payload because higher value")
|
||||||
return blk.SetExecution(localPayload)
|
return nil, blk.SetExecution(localPayload)
|
||||||
default: // Bellatrix case.
|
default: // Bellatrix case.
|
||||||
blk.SetBlinded(true)
|
blk.SetBlinded(true)
|
||||||
if err := blk.SetExecution(builderPayload); err != nil {
|
if err := blk.SetExecution(builderPayload); err != nil {
|
||||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
executionData, blobsBundle, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get execution payload")
|
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||||
}
|
}
|
||||||
return blk.SetExecution(executionData)
|
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch && len(blobsBundle.KzgCommitments) > 0 {
|
||||||
|
// TODO: check block hash matches blob bundle hash
|
||||||
|
if err := blk.SetBlobKzgCommitments(blobsBundle.KzgCommitments); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not set blob kzg commitments")
|
||||||
|
}
|
||||||
|
return blobsBundle.Blobs, blk.SetExecution(executionData)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, blk.SetExecution(executionData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function retrieves the payload header given the slot number and the validator index.
|
// This function retrieves the payload header given the slot number and the validator index.
|
||||||
@@ -187,17 +194,17 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
|
|||||||
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
|
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
|
||||||
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
|
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
|
||||||
}).Info("Received header with bid")
|
}).Info("Received header with bid")
|
||||||
|
|
||||||
return header, nil
|
return header, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||||
// bellatrix blind block. The output block will contain the full payload. The original header block
|
// bellatrix blind block. The output block will contain the full payload. The original header block
|
||||||
// will be returned the block builder is not configured.
|
// will be returned the block builder is not configured.
|
||||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// No-op if the input block is not version blind and bellatrix.
|
// No-op if the input block is not version blind and bellatrix.
|
||||||
if b.Version() != version.Bellatrix || !b.IsBlinded() {
|
if b.Version() != version.Bellatrix || !b.IsBlinded() {
|
||||||
return b, nil
|
return b, nil
|
||||||
|
|||||||
@@ -70,7 +70,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
|||||||
t.Run("No builder configured. Use local block", func(t *testing.T) {
|
t.Run("No builder configured. Use local block", func(t *testing.T) {
|
||||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||||
|
require.NoError(t, err)
|
||||||
e, err := blk.Block().Body().Execution()
|
e, err := blk.Block().Body().Execution()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
|
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
|
||||||
@@ -121,7 +122,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
|||||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||||
vs.TimeFetcher = chain
|
vs.TimeFetcher = chain
|
||||||
vs.HeadFetcher = chain
|
vs.HeadFetcher = chain
|
||||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||||
|
require.NoError(t, err)
|
||||||
e, err := blk.Block().Body().Execution()
|
e, err := blk.Block().Body().Execution()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
|
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
|
||||||
@@ -174,7 +176,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
|||||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||||
vs.TimeFetcher = chain
|
vs.TimeFetcher = chain
|
||||||
vs.HeadFetcher = chain
|
vs.HeadFetcher = chain
|
||||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||||
|
require.NoError(t, err)
|
||||||
e, err := blk.Block().Body().Execution()
|
e, err := blk.Block().Body().Execution()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
|
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
|
||||||
@@ -183,7 +186,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
|||||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(3)}
|
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(3)}
|
||||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||||
|
require.NoError(t, err)
|
||||||
e, err := blk.Block().Body().Execution()
|
e, err := blk.Block().Body().Execution()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
||||||
@@ -195,7 +199,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
|||||||
ErrGetHeader: errors.New("fault"),
|
ErrGetHeader: errors.New("fault"),
|
||||||
}
|
}
|
||||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: big.NewInt(0)}
|
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: big.NewInt(0)}
|
||||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||||
|
require.NoError(t, err)
|
||||||
e, err := blk.Block().Body().Execution()
|
e, err := blk.Block().Body().Execution()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
|
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
|
||||||
@@ -366,10 +371,10 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
blk interfaces.ReadOnlySignedBeaconBlock
|
blk interfaces.SignedBeaconBlock
|
||||||
mock *builderTest.MockBuilderService
|
mock *builderTest.MockBuilderService
|
||||||
err string
|
err string
|
||||||
returnedBlk interfaces.ReadOnlySignedBeaconBlock
|
returnedBlk interfaces.SignedBeaconBlock
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "nil block",
|
name: "nil block",
|
||||||
@@ -378,12 +383,12 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "old block version",
|
name: "old block version",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
}(),
|
}(),
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -391,7 +396,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "not configured",
|
name: "not configured",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -399,7 +404,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
mock: &builderTest.MockBuilderService{
|
mock: &builderTest.MockBuilderService{
|
||||||
HasConfigured: false,
|
HasConfigured: false,
|
||||||
},
|
},
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -407,7 +412,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "submit blind block error",
|
name: "submit blind block error",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBlindedBeaconBlockBellatrix()
|
b := util.NewBlindedBeaconBlockBellatrix()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -424,7 +429,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "head and payload root mismatch",
|
name: "head and payload root mismatch",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBlindedBeaconBlockBellatrix()
|
b := util.NewBlindedBeaconBlockBellatrix()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -436,7 +441,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
HasConfigured: true,
|
HasConfigured: true,
|
||||||
Payload: p,
|
Payload: p,
|
||||||
},
|
},
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBeaconBlockBellatrix()
|
b := util.NewBeaconBlockBellatrix()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -449,7 +454,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "can get payload",
|
name: "can get payload",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBlindedBeaconBlockBellatrix()
|
b := util.NewBlindedBeaconBlockBellatrix()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -475,7 +480,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
|||||||
HasConfigured: true,
|
HasConfigured: true,
|
||||||
Payload: p,
|
Payload: p,
|
||||||
},
|
},
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBeaconBlockBellatrix()
|
b := util.NewBeaconBlockBellatrix()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func (vs *Server) setBlsToExecData(blk interfaces.SignedBeaconBlock, headState s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return nil, errors.Wrap(err, "block is nil")
|
return nil, errors.Wrap(err, "block is nil")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
blk interfaces.ReadOnlySignedBeaconBlock
|
blk interfaces.SignedBeaconBlock
|
||||||
mock *builderTest.MockBuilderService
|
mock *builderTest.MockBuilderService
|
||||||
err string
|
err string
|
||||||
returnedBlk interfaces.ReadOnlySignedBeaconBlock
|
returnedBlk interfaces.SignedBeaconBlock
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "nil block",
|
name: "nil block",
|
||||||
@@ -33,12 +33,12 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "old block version",
|
name: "old block version",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
}(),
|
}(),
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -46,7 +46,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "not configured",
|
name: "not configured",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -54,7 +54,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
mock: &builderTest.MockBuilderService{
|
mock: &builderTest.MockBuilderService{
|
||||||
HasConfigured: false,
|
HasConfigured: false,
|
||||||
},
|
},
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -62,7 +62,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "submit blind block error",
|
name: "submit blind block error",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBlindedBeaconBlockCapella()
|
b := util.NewBlindedBeaconBlockCapella()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -79,7 +79,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "can get payload",
|
name: "can get payload",
|
||||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
blk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBlindedBeaconBlockCapella()
|
b := util.NewBlindedBeaconBlockCapella()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
@@ -108,7 +108,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
|||||||
HasConfigured: true,
|
HasConfigured: true,
|
||||||
PayloadCapella: p,
|
PayloadCapella: p,
|
||||||
},
|
},
|
||||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||||
b := util.NewBeaconBlockCapella()
|
b := util.NewBeaconBlockCapella()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
b.Block.ProposerIndex = 2
|
b.Block.ProposerIndex = 2
|
||||||
|
|||||||
@@ -30,11 +30,16 @@ func getEmptyBlock(slot primitives.Slot) (interfaces.SignedBeaconBlock, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||||
}
|
}
|
||||||
default:
|
case slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch:
|
||||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}})
|
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}})
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return sBlk, err
|
return sBlk, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,9 +40,10 @@ var (
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
// This returns the execution payload of a given slot.
|
||||||
|
// The function has full awareness of pre and post merge.
|
||||||
// The payload is computed given the respected time of merge.
|
// The payload is computed given the respected time of merge.
|
||||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot, vIdx primitives.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
|
func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot, vIdx primitives.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, *enginev1.BlobsBundle, error) {
|
||||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
|
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
|
||||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||||
@@ -62,7 +63,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
"Please refer to our documentation for instructions")
|
"Please refer to our documentation for instructions")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Wrap(err, "could not get fee recipient in db")
|
return nil, nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
||||||
@@ -73,10 +74,17 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||||
return payload, nil
|
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, pid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
|
||||||
|
}
|
||||||
|
return payload, sc, nil
|
||||||
|
}
|
||||||
|
return payload, nil, nil
|
||||||
case errors.Is(err, context.DeadlineExceeded):
|
case errors.Is(err, context.DeadlineExceeded):
|
||||||
default:
|
default:
|
||||||
return nil, errors.Wrap(err, "could not get cached payload from execution client")
|
return nil, nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,53 +92,61 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
var hasTerminalBlock bool
|
var hasTerminalBlock bool
|
||||||
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
|
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err := slots.ToTime(st.GenesisTime(), slot)
|
t, err := slots.ToTime(st.GenesisTime(), slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if mergeComplete {
|
if mergeComplete {
|
||||||
header, err := st.LatestExecutionPayloadHeader()
|
header, err := st.LatestExecutionPayloadHeader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
parentHash = header.BlockHash()
|
parentHash = header.BlockHash()
|
||||||
} else {
|
} else {
|
||||||
if activationEpochNotReached(slot) {
|
if activationEpochNotReached(slot) {
|
||||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return p, nil, nil
|
||||||
}
|
}
|
||||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if !hasTerminalBlock {
|
if !hasTerminalBlock {
|
||||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return p, nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
payloadIDCacheMiss.Inc()
|
payloadIDCacheMiss.Inc()
|
||||||
|
|
||||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||||
finalizedRoot := bytesutil.ToBytes32(st.FinalizedCheckpoint().Root)
|
finalizedRoot := bytesutil.ToBytes32(st.FinalizedCheckpoint().Root)
|
||||||
if finalizedRoot != [32]byte{} { // finalized root could be zeros before the first finalized block.
|
if finalizedRoot != [32]byte{} { // finalized root could be zeros before the first finalized block.
|
||||||
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
|
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if err := consensusblocks.BeaconBlockIsNil(finalizedBlock); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(finalizedBlock); err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
switch finalizedBlock.Version() {
|
switch finalizedBlock.Version() {
|
||||||
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||||
default:
|
default:
|
||||||
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
|
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
finalizedBlockHash = finalizedPayload.BlockHash()
|
finalizedBlockHash = finalizedPayload.BlockHash()
|
||||||
}
|
}
|
||||||
@@ -143,10 +159,10 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
}
|
}
|
||||||
var attr payloadattribute.Attributer
|
var attr payloadattribute.Attributer
|
||||||
switch st.Version() {
|
switch st.Version() {
|
||||||
case version.Capella:
|
case version.Capella, version.Deneb:
|
||||||
withdrawals, err := st.ExpectedWithdrawals()
|
withdrawals, err := st.ExpectedWithdrawals()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||||
Timestamp: uint64(t.Unix()),
|
Timestamp: uint64(t.Unix()),
|
||||||
@@ -155,7 +171,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
Withdrawals: withdrawals,
|
Withdrawals: withdrawals,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
case version.Bellatrix:
|
case version.Bellatrix:
|
||||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||||
@@ -164,25 +180,32 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
|||||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("unknown beacon state version")
|
return nil, nil, errors.New("unknown beacon state version")
|
||||||
}
|
}
|
||||||
|
|
||||||
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, attr)
|
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, attr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not prepare payload")
|
return nil, nil, errors.Wrap(err, "could not prepare payload")
|
||||||
}
|
}
|
||||||
if payloadID == nil {
|
if payloadID == nil {
|
||||||
return nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
return nil, nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||||
}
|
}
|
||||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||||
return payload, nil
|
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, *payloadID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
|
||||||
|
}
|
||||||
|
return payload, sc, nil
|
||||||
|
}
|
||||||
|
return payload, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
|||||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||||
}
|
}
|
||||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
_, _, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
||||||
if tt.errString != "" {
|
if tt.errString != "" {
|
||||||
require.ErrorContains(t, tt.errString, err)
|
require.ErrorContains(t, tt.errString, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -179,7 +179,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||||
|
|
||||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
_, _, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,7 +224,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
|||||||
BeaconDB: beaconDB,
|
BeaconDB: beaconDB,
|
||||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||||
}
|
}
|
||||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
gotPayload, _, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, gotPayload)
|
require.NotNil(t, gotPayload)
|
||||||
|
|
||||||
@@ -236,7 +236,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
|||||||
payload.FeeRecipient = evilRecipientAddress[:]
|
payload.FeeRecipient = evilRecipientAddress[:]
|
||||||
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||||
|
|
||||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
gotPayload, _, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, gotPayload)
|
require.NotNil(t, gotPayload)
|
||||||
|
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ type BeaconState struct {
|
|||||||
nextSyncCommittee *ethpb.SyncCommittee
|
nextSyncCommittee *ethpb.SyncCommittee
|
||||||
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
||||||
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
||||||
|
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
|
||||||
nextWithdrawalIndex uint64
|
nextWithdrawalIndex uint64
|
||||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||||
|
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ type BeaconState struct {
|
|||||||
nextSyncCommittee *ethpb.SyncCommittee
|
nextSyncCommittee *ethpb.SyncCommittee
|
||||||
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
|
||||||
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
|
||||||
|
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
|
||||||
nextWithdrawalIndex uint64
|
nextWithdrawalIndex uint64
|
||||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,12 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (interfaces.ExecutionData,
|
|||||||
if b.version == version.Bellatrix {
|
if b.version == version.Bellatrix {
|
||||||
return blocks.WrappedExecutionPayloadHeader(b.latestExecutionPayloadHeaderVal())
|
return blocks.WrappedExecutionPayloadHeader(b.latestExecutionPayloadHeaderVal())
|
||||||
}
|
}
|
||||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), big.NewInt(0))
|
|
||||||
|
if b.version == version.Capella {
|
||||||
|
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), big.NewInt(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), big.NewInt(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
// latestExecutionPayloadHeaderVal of the beacon state.
|
// latestExecutionPayloadHeaderVal of the beacon state.
|
||||||
@@ -36,3 +41,7 @@ func (b *BeaconState) latestExecutionPayloadHeaderVal() *enginev1.ExecutionPaylo
|
|||||||
func (b *BeaconState) latestExecutionPayloadHeaderCapellaVal() *enginev1.ExecutionPayloadHeaderCapella {
|
func (b *BeaconState) latestExecutionPayloadHeaderCapellaVal() *enginev1.ExecutionPayloadHeaderCapella {
|
||||||
return ethpb.CopyExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapella)
|
return ethpb.CopyExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapella)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *BeaconState) latestExecutionPayloadHeaderDenebVal() *enginev1.ExecutionPayloadHeaderDeneb {
|
||||||
|
return ethpb.CopyExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDeneb)
|
||||||
|
}
|
||||||
|
|||||||
@@ -128,6 +128,37 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
|||||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||||
HistoricalSummaries: b.historicalSummaries,
|
HistoricalSummaries: b.historicalSummaries,
|
||||||
}
|
}
|
||||||
|
case version.Deneb:
|
||||||
|
return ðpb.BeaconStateDeneb{
|
||||||
|
GenesisTime: b.genesisTime,
|
||||||
|
GenesisValidatorsRoot: gvrCopy[:],
|
||||||
|
Slot: b.slot,
|
||||||
|
Fork: b.fork,
|
||||||
|
LatestBlockHeader: b.latestBlockHeader,
|
||||||
|
BlockRoots: b.blockRoots.Slice(),
|
||||||
|
StateRoots: b.stateRoots.Slice(),
|
||||||
|
HistoricalRoots: b.historicalRoots.Slice(),
|
||||||
|
Eth1Data: b.eth1Data,
|
||||||
|
Eth1DataVotes: b.eth1DataVotes,
|
||||||
|
Eth1DepositIndex: b.eth1DepositIndex,
|
||||||
|
Validators: b.validators,
|
||||||
|
Balances: b.balances,
|
||||||
|
RandaoMixes: b.randaoMixes.Slice(),
|
||||||
|
Slashings: b.slashings,
|
||||||
|
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||||
|
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||||
|
JustificationBits: b.justificationBits,
|
||||||
|
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||||
|
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||||
|
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||||
|
InactivityScores: b.inactivityScores,
|
||||||
|
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||||
|
NextSyncCommittee: b.nextSyncCommittee,
|
||||||
|
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDeneb,
|
||||||
|
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||||
|
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||||
|
HistoricalSummaries: b.historicalSummaries,
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -255,6 +286,37 @@ func (b *BeaconState) ToProto() interface{} {
|
|||||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||||
HistoricalSummaries: b.historicalSummariesVal(),
|
HistoricalSummaries: b.historicalSummariesVal(),
|
||||||
}
|
}
|
||||||
|
case version.Deneb:
|
||||||
|
return ðpb.BeaconStateDeneb{
|
||||||
|
GenesisTime: b.genesisTime,
|
||||||
|
GenesisValidatorsRoot: gvrCopy[:],
|
||||||
|
Slot: b.slot,
|
||||||
|
Fork: b.forkVal(),
|
||||||
|
LatestBlockHeader: b.latestBlockHeaderVal(),
|
||||||
|
BlockRoots: b.blockRoots.Slice(),
|
||||||
|
StateRoots: b.stateRoots.Slice(),
|
||||||
|
HistoricalRoots: b.historicalRoots.Slice(),
|
||||||
|
Eth1Data: b.eth1DataVal(),
|
||||||
|
Eth1DataVotes: b.eth1DataVotesVal(),
|
||||||
|
Eth1DepositIndex: b.eth1DepositIndex,
|
||||||
|
Validators: b.validatorsVal(),
|
||||||
|
Balances: b.balancesVal(),
|
||||||
|
RandaoMixes: b.randaoMixes.Slice(),
|
||||||
|
Slashings: b.slashingsVal(),
|
||||||
|
PreviousEpochParticipation: b.previousEpochParticipationVal(),
|
||||||
|
CurrentEpochParticipation: b.currentEpochParticipationVal(),
|
||||||
|
JustificationBits: b.justificationBitsVal(),
|
||||||
|
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpointVal(),
|
||||||
|
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpointVal(),
|
||||||
|
FinalizedCheckpoint: b.finalizedCheckpointVal(),
|
||||||
|
InactivityScores: b.inactivityScoresVal(),
|
||||||
|
CurrentSyncCommittee: b.currentSyncCommitteeVal(),
|
||||||
|
NextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||||
|
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDenebVal(),
|
||||||
|
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||||
|
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||||
|
HistoricalSummaries: b.historicalSummariesVal(),
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -338,3 +400,11 @@ func ProtobufBeaconStateCapella(s interface{}) (*ethpb.BeaconStateCapella, error
|
|||||||
}
|
}
|
||||||
return pbState, nil
|
return pbState, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ProtobufBeaconStateDeneb(s interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||||
|
pbState, ok := s.(*ethpb.BeaconStateDeneb)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("input is not type pb.BeaconStateCapella")
|
||||||
|
}
|
||||||
|
return pbState, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -33,6 +33,8 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
|||||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||||
|
case version.Deneb:
|
||||||
|
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Genesis time root.
|
// Genesis time root.
|
||||||
@@ -262,5 +264,31 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
|||||||
fieldRoots[types.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
|
fieldRoots[types.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if state.version == version.Deneb {
|
||||||
|
// Execution payload root.
|
||||||
|
executionPayloadRoot, err := state.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fieldRoots[types.LatestExecutionPayloadHeaderDeneb.RealPosition()] = executionPayloadRoot[:]
|
||||||
|
|
||||||
|
// Next withdrawal index root.
|
||||||
|
nextWithdrawalIndexRoot := make([]byte, 32)
|
||||||
|
binary.LittleEndian.PutUint64(nextWithdrawalIndexRoot, state.nextWithdrawalIndex)
|
||||||
|
fieldRoots[types.NextWithdrawalIndex.RealPosition()] = nextWithdrawalIndexRoot
|
||||||
|
|
||||||
|
// Next partial withdrawal validator index root.
|
||||||
|
nextWithdrawalValidatorIndexRoot := make([]byte, 32)
|
||||||
|
binary.LittleEndian.PutUint64(nextWithdrawalValidatorIndexRoot, uint64(state.nextWithdrawalValidatorIndex))
|
||||||
|
fieldRoots[types.NextWithdrawalValidatorIndex.RealPosition()] = nextWithdrawalValidatorIndexRoot
|
||||||
|
|
||||||
|
// Historical summary root.
|
||||||
|
historicalSummaryRoot, err := stateutil.HistoricalSummariesRoot(state.historicalSummaries)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not compute historical summary merkleization")
|
||||||
|
}
|
||||||
|
fieldRoots[types.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
|
||||||
|
}
|
||||||
|
|
||||||
return fieldRoots, nil
|
return fieldRoots, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,6 +36,14 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
|
|||||||
b.latestExecutionPayloadHeaderCapella = latest
|
b.latestExecutionPayloadHeaderCapella = latest
|
||||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
||||||
return nil
|
return nil
|
||||||
|
case *enginev1.ExecutionPayloadDeneb:
|
||||||
|
latest, err := consensusblocks.PayloadToHeaderDeneb(val)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not convert payload to header")
|
||||||
|
}
|
||||||
|
b.latestExecutionPayloadHeaderDeneb = latest
|
||||||
|
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderDeneb)
|
||||||
|
return nil
|
||||||
case *enginev1.ExecutionPayloadHeader:
|
case *enginev1.ExecutionPayloadHeader:
|
||||||
b.latestExecutionPayloadHeader = header
|
b.latestExecutionPayloadHeader = header
|
||||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeader)
|
b.markFieldAsDirty(types.LatestExecutionPayloadHeader)
|
||||||
@@ -44,6 +52,10 @@ func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionDa
|
|||||||
b.latestExecutionPayloadHeaderCapella = header
|
b.latestExecutionPayloadHeaderCapella = header
|
||||||
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderCapella)
|
||||||
return nil
|
return nil
|
||||||
|
case *enginev1.ExecutionPayloadHeaderDeneb:
|
||||||
|
b.latestExecutionPayloadHeaderDeneb = header
|
||||||
|
b.markFieldAsDirty(types.LatestExecutionPayloadHeaderDeneb)
|
||||||
|
return nil
|
||||||
default:
|
default:
|
||||||
return errors.New("value must be an execution payload header")
|
return errors.New("value must be an execution payload header")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
|
|
||||||
func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
|
func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
|
||||||
switch b.version {
|
switch b.version {
|
||||||
case version.Bellatrix, version.Capella:
|
case version.Bellatrix, version.Capella, version.Deneb:
|
||||||
return params.BeaconConfig().ProportionalSlashingMultiplierBellatrix, nil
|
return params.BeaconConfig().ProportionalSlashingMultiplierBellatrix, nil
|
||||||
case version.Altair:
|
case version.Altair:
|
||||||
return params.BeaconConfig().ProportionalSlashingMultiplierAltair, nil
|
return params.BeaconConfig().ProportionalSlashingMultiplierAltair, nil
|
||||||
@@ -19,7 +19,7 @@ func (b *BeaconState) ProportionalSlashingMultiplier() (uint64, error) {
|
|||||||
|
|
||||||
func (b *BeaconState) InactivityPenaltyQuotient() (uint64, error) {
|
func (b *BeaconState) InactivityPenaltyQuotient() (uint64, error) {
|
||||||
switch b.version {
|
switch b.version {
|
||||||
case version.Bellatrix, version.Capella:
|
case version.Bellatrix, version.Capella, version.Deneb:
|
||||||
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix, nil
|
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix, nil
|
||||||
case version.Altair:
|
case version.Altair:
|
||||||
return params.BeaconConfig().InactivityPenaltyQuotientAltair, nil
|
return params.BeaconConfig().InactivityPenaltyQuotientAltair, nil
|
||||||
|
|||||||
@@ -83,10 +83,19 @@ var capellaFields = append(
|
|||||||
types.HistoricalSummaries,
|
types.HistoricalSummaries,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var denebFields = append(
|
||||||
|
altairFields,
|
||||||
|
types.LatestExecutionPayloadHeaderDeneb,
|
||||||
|
types.NextWithdrawalIndex,
|
||||||
|
types.NextWithdrawalValidatorIndex,
|
||||||
|
types.HistoricalSummaries,
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
phase0SharedFieldRefCount = 10
|
phase0SharedFieldRefCount = 10
|
||||||
altairSharedFieldRefCount = 11
|
altairSharedFieldRefCount = 11
|
||||||
bellatrixSharedFieldRefCount = 12
|
bellatrixSharedFieldRefCount = 12
|
||||||
|
denebSharedFieldRefCount = 14
|
||||||
capellaSharedFieldRefCount = 14
|
capellaSharedFieldRefCount = 14
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -110,6 +119,11 @@ func InitializeFromProtoCapella(st *ethpb.BeaconStateCapella) (state.BeaconState
|
|||||||
return InitializeFromProtoUnsafeCapella(proto.Clone(st).(*ethpb.BeaconStateCapella))
|
return InitializeFromProtoUnsafeCapella(proto.Clone(st).(*ethpb.BeaconStateCapella))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitializeFromProtoDeneb the beacon state from a protobuf representation.
|
||||||
|
func InitializeFromProtoDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
|
||||||
|
return InitializeFromProtoUnsafeDeneb(proto.Clone(st).(*ethpb.BeaconStateDeneb))
|
||||||
|
}
|
||||||
|
|
||||||
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
|
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
|
||||||
// and sets them as fields of the BeaconState type.
|
// and sets them as fields of the BeaconState type.
|
||||||
func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState, error) {
|
func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState, error) {
|
||||||
@@ -474,6 +488,102 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
|||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitializeFromProtoUnsafeDeneb directly uses the beacon state protobuf fields
|
||||||
|
// and sets them as fields of the BeaconState type.
|
||||||
|
func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconState, error) {
|
||||||
|
if st == nil {
|
||||||
|
return nil, errors.New("received nil state")
|
||||||
|
}
|
||||||
|
|
||||||
|
var bRoots customtypes.BlockRoots
|
||||||
|
for i, r := range st.BlockRoots {
|
||||||
|
bRoots[i] = bytesutil.ToBytes32(r)
|
||||||
|
}
|
||||||
|
var sRoots customtypes.StateRoots
|
||||||
|
for i, r := range st.StateRoots {
|
||||||
|
sRoots[i] = bytesutil.ToBytes32(r)
|
||||||
|
}
|
||||||
|
hRoots := customtypes.HistoricalRoots(make([][32]byte, len(st.HistoricalRoots)))
|
||||||
|
for i, r := range st.HistoricalRoots {
|
||||||
|
hRoots[i] = bytesutil.ToBytes32(r)
|
||||||
|
}
|
||||||
|
var mixes customtypes.RandaoMixes
|
||||||
|
for i, m := range st.RandaoMixes {
|
||||||
|
mixes[i] = bytesutil.ToBytes32(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldCount := params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||||
|
b := &BeaconState{
|
||||||
|
version: version.Deneb,
|
||||||
|
genesisTime: st.GenesisTime,
|
||||||
|
genesisValidatorsRoot: bytesutil.ToBytes32(st.GenesisValidatorsRoot),
|
||||||
|
slot: st.Slot,
|
||||||
|
fork: st.Fork,
|
||||||
|
latestBlockHeader: st.LatestBlockHeader,
|
||||||
|
blockRoots: &bRoots,
|
||||||
|
stateRoots: &sRoots,
|
||||||
|
historicalRoots: hRoots,
|
||||||
|
eth1Data: st.Eth1Data,
|
||||||
|
eth1DataVotes: st.Eth1DataVotes,
|
||||||
|
eth1DepositIndex: st.Eth1DepositIndex,
|
||||||
|
validators: st.Validators,
|
||||||
|
balances: st.Balances,
|
||||||
|
randaoMixes: &mixes,
|
||||||
|
slashings: st.Slashings,
|
||||||
|
previousEpochParticipation: st.PreviousEpochParticipation,
|
||||||
|
currentEpochParticipation: st.CurrentEpochParticipation,
|
||||||
|
justificationBits: st.JustificationBits,
|
||||||
|
previousJustifiedCheckpoint: st.PreviousJustifiedCheckpoint,
|
||||||
|
currentJustifiedCheckpoint: st.CurrentJustifiedCheckpoint,
|
||||||
|
finalizedCheckpoint: st.FinalizedCheckpoint,
|
||||||
|
inactivityScores: st.InactivityScores,
|
||||||
|
currentSyncCommittee: st.CurrentSyncCommittee,
|
||||||
|
nextSyncCommittee: st.NextSyncCommittee,
|
||||||
|
latestExecutionPayloadHeaderDeneb: st.LatestExecutionPayloadHeader,
|
||||||
|
nextWithdrawalIndex: st.NextWithdrawalIndex,
|
||||||
|
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
|
||||||
|
historicalSummaries: st.HistoricalSummaries,
|
||||||
|
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||||
|
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||||
|
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||||
|
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount),
|
||||||
|
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||||
|
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range denebFields {
|
||||||
|
b.dirtyFields[f] = true
|
||||||
|
b.rebuildTrie[f] = true
|
||||||
|
b.dirtyIndices[f] = []uint64{}
|
||||||
|
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.stateFieldLeaves[f] = trie
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize field reference tracking for shared data.
|
||||||
|
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.HistoricalRoots] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.Eth1DataVotes] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.Validators] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.Balances] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.RandaoMixes] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.InactivityScores] = stateutil.NewRef(1)
|
||||||
|
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1) // New in Deneb.
|
||||||
|
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1) // New in Capella.
|
||||||
|
|
||||||
|
state.StateCount.Inc()
|
||||||
|
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||||
|
runtime.SetFinalizer(b, finalizerCleanup)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Copy returns a deep copy of the beacon state.
|
// Copy returns a deep copy of the beacon state.
|
||||||
func (b *BeaconState) Copy() state.BeaconState {
|
func (b *BeaconState) Copy() state.BeaconState {
|
||||||
b.lock.RLock()
|
b.lock.RLock()
|
||||||
@@ -489,6 +599,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
|||||||
fieldCount = params.BeaconConfig().BeaconStateBellatrixFieldCount
|
fieldCount = params.BeaconConfig().BeaconStateBellatrixFieldCount
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
|
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||||
|
case version.Deneb:
|
||||||
|
fieldCount = params.BeaconConfig().BeaconStateCapellaFieldCount
|
||||||
}
|
}
|
||||||
|
|
||||||
dst := &BeaconState{
|
dst := &BeaconState{
|
||||||
@@ -532,6 +644,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
|||||||
nextSyncCommittee: b.nextSyncCommitteeVal(),
|
nextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||||
latestExecutionPayloadHeader: b.latestExecutionPayloadHeaderVal(),
|
latestExecutionPayloadHeader: b.latestExecutionPayloadHeaderVal(),
|
||||||
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapellaVal(),
|
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapellaVal(),
|
||||||
|
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDenebVal(),
|
||||||
|
|
||||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||||
@@ -551,6 +664,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
|||||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
|
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, bellatrixSharedFieldRefCount)
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
|
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, capellaSharedFieldRefCount)
|
||||||
|
case version.Deneb:
|
||||||
|
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, denebSharedFieldRefCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
for field, ref := range b.sharedFieldReferences {
|
for field, ref := range b.sharedFieldReferences {
|
||||||
@@ -640,6 +755,8 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
|||||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateBellatrixFieldCount)
|
||||||
case version.Capella:
|
case version.Capella:
|
||||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||||
|
case version.Deneb:
|
||||||
|
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -830,6 +947,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
|||||||
return b.latestExecutionPayloadHeader.HashTreeRoot()
|
return b.latestExecutionPayloadHeader.HashTreeRoot()
|
||||||
case types.LatestExecutionPayloadHeaderCapella:
|
case types.LatestExecutionPayloadHeaderCapella:
|
||||||
return b.latestExecutionPayloadHeaderCapella.HashTreeRoot()
|
return b.latestExecutionPayloadHeaderCapella.HashTreeRoot()
|
||||||
|
case types.LatestExecutionPayloadHeaderDeneb:
|
||||||
|
return b.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
|
||||||
case types.NextWithdrawalIndex:
|
case types.NextWithdrawalIndex:
|
||||||
return ssz.Uint64Root(b.nextWithdrawalIndex), nil
|
return ssz.Uint64Root(b.nextWithdrawalIndex), nil
|
||||||
case types.NextWithdrawalValidatorIndex:
|
case types.NextWithdrawalValidatorIndex:
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ func (f FieldIndex) RealPosition() int {
|
|||||||
return 22
|
return 22
|
||||||
case NextSyncCommittee:
|
case NextSyncCommittee:
|
||||||
return 23
|
return 23
|
||||||
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella:
|
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella, LatestExecutionPayloadHeaderDeneb:
|
||||||
return 24
|
return 24
|
||||||
case NextWithdrawalIndex:
|
case NextWithdrawalIndex:
|
||||||
return 25
|
return 25
|
||||||
@@ -205,6 +205,7 @@ const (
|
|||||||
NextSyncCommittee
|
NextSyncCommittee
|
||||||
LatestExecutionPayloadHeader
|
LatestExecutionPayloadHeader
|
||||||
LatestExecutionPayloadHeaderCapella
|
LatestExecutionPayloadHeaderCapella
|
||||||
|
LatestExecutionPayloadHeaderDeneb
|
||||||
NextWithdrawalIndex
|
NextWithdrawalIndex
|
||||||
NextWithdrawalValidatorIndex
|
NextWithdrawalValidatorIndex
|
||||||
HistoricalSummaries
|
HistoricalSummaries
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot primi
|
|||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||||
}
|
}
|
||||||
case version.Altair, version.Bellatrix, version.Capella:
|
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||||
state, err = altair.ProcessEpoch(ctx, state)
|
state, err = altair.ProcessEpoch(ctx, state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
@@ -243,6 +243,14 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot primi
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if prysmtime.CanUpgradeToDeneb(state.Slot()) {
|
||||||
|
state, err = capella.UpgradeToDeneb(state)
|
||||||
|
if err != nil {
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return state, nil
|
return state, nil
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"batch_verifier.go",
|
"batch_verifier.go",
|
||||||
|
"block_and_blob_queue.go",
|
||||||
|
"block_batcher.go",
|
||||||
"broadcast_bls_changes.go",
|
"broadcast_bls_changes.go",
|
||||||
"context.go",
|
"context.go",
|
||||||
"deadlines.go",
|
"deadlines.go",
|
||||||
@@ -21,6 +23,8 @@ go_library(
|
|||||||
"rpc.go",
|
"rpc.go",
|
||||||
"rpc_beacon_blocks_by_range.go",
|
"rpc_beacon_blocks_by_range.go",
|
||||||
"rpc_beacon_blocks_by_root.go",
|
"rpc_beacon_blocks_by_root.go",
|
||||||
|
"rpc_blob_sidecars_by_range.go",
|
||||||
|
"rpc_blob_sidecars_by_root.go",
|
||||||
"rpc_chunked_response.go",
|
"rpc_chunked_response.go",
|
||||||
"rpc_goodbye.go",
|
"rpc_goodbye.go",
|
||||||
"rpc_metadata.go",
|
"rpc_metadata.go",
|
||||||
@@ -32,6 +36,7 @@ go_library(
|
|||||||
"subscriber_beacon_aggregate_proof.go",
|
"subscriber_beacon_aggregate_proof.go",
|
||||||
"subscriber_beacon_attestation.go",
|
"subscriber_beacon_attestation.go",
|
||||||
"subscriber_beacon_blocks.go",
|
"subscriber_beacon_blocks.go",
|
||||||
|
"subscriber_blob_sidecar.go",
|
||||||
"subscriber_bls_to_execution_change.go",
|
"subscriber_bls_to_execution_change.go",
|
||||||
"subscriber_handlers.go",
|
"subscriber_handlers.go",
|
||||||
"subscriber_sync_committee_message.go",
|
"subscriber_sync_committee_message.go",
|
||||||
@@ -42,6 +47,7 @@ go_library(
|
|||||||
"validate_attester_slashing.go",
|
"validate_attester_slashing.go",
|
||||||
"validate_beacon_attestation.go",
|
"validate_beacon_attestation.go",
|
||||||
"validate_beacon_blocks.go",
|
"validate_beacon_blocks.go",
|
||||||
|
"validate_blob.go",
|
||||||
"validate_bls_to_execution_change.go",
|
"validate_bls_to_execution_change.go",
|
||||||
"validate_proposer_slashing.go",
|
"validate_proposer_slashing.go",
|
||||||
"validate_sync_committee_message.go",
|
"validate_sync_committee_message.go",
|
||||||
@@ -69,7 +75,6 @@ go_library(
|
|||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/transition:go_default_library",
|
"//beacon-chain/core/transition:go_default_library",
|
||||||
"//beacon-chain/core/transition/interop:go_default_library",
|
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/filters:go_default_library",
|
"//beacon-chain/db/filters:go_default_library",
|
||||||
"//beacon-chain/execution:go_default_library",
|
"//beacon-chain/execution:go_default_library",
|
||||||
@@ -109,6 +114,7 @@ go_library(
|
|||||||
"//time:go_default_library",
|
"//time:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//params:go_default_library",
|
||||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||||
@@ -134,6 +140,9 @@ go_test(
|
|||||||
size = "small",
|
size = "small",
|
||||||
srcs = [
|
srcs = [
|
||||||
"batch_verifier_test.go",
|
"batch_verifier_test.go",
|
||||||
|
"blobs_test.go",
|
||||||
|
"block_and_blob_queue_test.go",
|
||||||
|
"block_batcher_test.go",
|
||||||
"broadcast_bls_changes_test.go",
|
"broadcast_bls_changes_test.go",
|
||||||
"context_test.go",
|
"context_test.go",
|
||||||
"decode_pubsub_test.go",
|
"decode_pubsub_test.go",
|
||||||
@@ -144,8 +153,11 @@ go_test(
|
|||||||
"rate_limiter_test.go",
|
"rate_limiter_test.go",
|
||||||
"rpc_beacon_blocks_by_range_test.go",
|
"rpc_beacon_blocks_by_range_test.go",
|
||||||
"rpc_beacon_blocks_by_root_test.go",
|
"rpc_beacon_blocks_by_root_test.go",
|
||||||
|
"rpc_blob_sidecars_by_range_test.go",
|
||||||
|
"rpc_blob_sidecars_by_root_test.go",
|
||||||
"rpc_chunked_response_test.go",
|
"rpc_chunked_response_test.go",
|
||||||
"rpc_goodbye_test.go",
|
"rpc_goodbye_test.go",
|
||||||
|
"rpc_handler_test.go",
|
||||||
"rpc_metadata_test.go",
|
"rpc_metadata_test.go",
|
||||||
"rpc_ping_test.go",
|
"rpc_ping_test.go",
|
||||||
"rpc_send_request_test.go",
|
"rpc_send_request_test.go",
|
||||||
@@ -158,7 +170,6 @@ go_test(
|
|||||||
"subscription_topic_handler_test.go",
|
"subscription_topic_handler_test.go",
|
||||||
"sync_fuzz_test.go",
|
"sync_fuzz_test.go",
|
||||||
"sync_test.go",
|
"sync_test.go",
|
||||||
"utils_test.go",
|
|
||||||
"validate_aggregate_proof_test.go",
|
"validate_aggregate_proof_test.go",
|
||||||
"validate_attester_slashing_test.go",
|
"validate_attester_slashing_test.go",
|
||||||
"validate_beacon_attestation_test.go",
|
"validate_beacon_attestation_test.go",
|
||||||
|
|||||||
324
beacon-chain/sync/blobs_test.go
Normal file
324
beacon-chain/sync/blobs_test.go
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||||
|
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||||
|
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||||
|
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||||
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
)
|
||||||
|
|
||||||
|
type blobsTestCase struct {
|
||||||
|
name string
|
||||||
|
nblocks int // how many blocks to loop through in setting up test fixtures & requests
|
||||||
|
missing map[int]map[int]bool // skip this blob index, so that we can test different custody scenarios
|
||||||
|
expired map[int]bool // mark block expired to test scenarios where requests are outside retention window
|
||||||
|
chain *mock.ChainService // allow tests to control retention window via current slot and finalized checkpoint
|
||||||
|
total *int // allow a test to specify the total number of responses received
|
||||||
|
err error
|
||||||
|
serverHandle testHandler
|
||||||
|
defineExpected expectedDefiner
|
||||||
|
requestFromSidecars requestFromSidecars
|
||||||
|
topic protocol.ID
|
||||||
|
oldestSlot oldestSlotCallback
|
||||||
|
}
|
||||||
|
|
||||||
|
type testHandler func(s *Service) rpcHandler
|
||||||
|
type expectedDefiner func(t *testing.T, scs []*ethpb.BlobSidecar, req interface{}) []*expectedBlobChunk
|
||||||
|
type requestFromSidecars func([]*ethpb.BlobSidecar) interface{}
|
||||||
|
type oldestSlotCallback func(t *testing.T) types.Slot
|
||||||
|
|
||||||
|
func generateTestBlockWithSidecars(t *testing.T, parent [32]byte, slot types.Slot, nblobs int) (*ethpb.SignedBeaconBlockDeneb, []*ethpb.BlobSidecar) {
|
||||||
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||||
|
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
|
||||||
|
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
||||||
|
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
|
||||||
|
parentHash := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||||
|
tx := gethTypes.NewTransaction(
|
||||||
|
0,
|
||||||
|
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
|
||||||
|
big.NewInt(0), 0, big.NewInt(0),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
txs := []*gethTypes.Transaction{tx}
|
||||||
|
encodedBinaryTxs := make([][]byte, 1)
|
||||||
|
var err error
|
||||||
|
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
|
||||||
|
require.NoError(t, err)
|
||||||
|
blockHash := bytesutil.ToBytes32([]byte("foo"))
|
||||||
|
payload := &enginev1.ExecutionPayloadDeneb{
|
||||||
|
ParentHash: parentHash,
|
||||||
|
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||||
|
StateRoot: stateRoot,
|
||||||
|
ReceiptsRoot: receiptsRoot,
|
||||||
|
LogsBloom: logsBloom,
|
||||||
|
PrevRandao: blockHash[:],
|
||||||
|
BlockNumber: 0,
|
||||||
|
GasLimit: 0,
|
||||||
|
GasUsed: 0,
|
||||||
|
Timestamp: 0,
|
||||||
|
ExtraData: make([]byte, 0),
|
||||||
|
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
|
||||||
|
ExcessDataGas: bytesutil.PadTo([]byte("excessDataGas"), fieldparams.RootLength),
|
||||||
|
BlockHash: blockHash[:],
|
||||||
|
Transactions: encodedBinaryTxs,
|
||||||
|
}
|
||||||
|
block := util.NewBeaconBlockDeneb()
|
||||||
|
block.Block.Body.ExecutionPayload = payload
|
||||||
|
block.Block.Slot = slot
|
||||||
|
block.Block.ParentRoot = parent[:]
|
||||||
|
commitments := make([][48]byte, nblobs)
|
||||||
|
block.Block.Body.BlobKzgCommitments = make([][]byte, nblobs)
|
||||||
|
for i := range commitments {
|
||||||
|
binary.LittleEndian.PutUint64(commitments[i][:], uint64(i))
|
||||||
|
block.Block.Body.BlobKzgCommitments[i] = commitments[i][:]
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := block.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sidecars := make([]*ethpb.BlobSidecar, len(commitments))
|
||||||
|
for i, c := range block.Block.Body.BlobKzgCommitments {
|
||||||
|
sidecars[i] = generateTestSidecar(root, block, i, c)
|
||||||
|
}
|
||||||
|
return block, sidecars
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTestSidecar(root [32]byte, block *ethpb.SignedBeaconBlockDeneb, index int, commitment []byte) *ethpb.BlobSidecar {
|
||||||
|
blob := &enginev1.Blob{
|
||||||
|
Data: make([]byte, fieldparams.BlobSize),
|
||||||
|
}
|
||||||
|
binary.LittleEndian.PutUint64(blob.Data, uint64(index))
|
||||||
|
sc := ðpb.BlobSidecar{
|
||||||
|
BlockRoot: root[:],
|
||||||
|
Index: uint64(index),
|
||||||
|
Slot: block.Block.Slot,
|
||||||
|
BlockParentRoot: block.Block.ParentRoot,
|
||||||
|
ProposerIndex: block.Block.ProposerIndex,
|
||||||
|
Blob: blob,
|
||||||
|
KzgCommitment: commitment,
|
||||||
|
KzgProof: commitment,
|
||||||
|
}
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
|
||||||
|
type expectedBlobChunk struct {
|
||||||
|
code uint8
|
||||||
|
sidecar *ethpb.BlobSidecar
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream network.Stream) {
|
||||||
|
d := s.cfg.p2p.Encoding().DecodeWithMaxLength
|
||||||
|
|
||||||
|
code, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, r.code, code, "unexpected response code")
|
||||||
|
//require.Equal(t, r.message, msg, "unexpected error message")
|
||||||
|
if code != responseCodeSuccess {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := readContextFromStream(stream, s.cfg.chain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
valRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||||
|
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.GetSlot()), valRoot[:])
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, ctxBytes, bytesutil.ToBytes4(c))
|
||||||
|
|
||||||
|
sc := ðpb.BlobSidecar{}
|
||||||
|
require.NoError(t, d(stream, sc))
|
||||||
|
require.Equal(t, bytesutil.ToBytes32(sc.BlockRoot), bytesutil.ToBytes32(r.sidecar.BlockRoot))
|
||||||
|
require.Equal(t, sc.Index, r.sidecar.Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) setup(t *testing.T) (*Service, []*ethpb.BlobSidecar, func()) {
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
repositionFutureEpochs(cfg)
|
||||||
|
undo, err := params.SetActiveWithUndo(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
cleanup := func() {
|
||||||
|
require.NoError(t, undo())
|
||||||
|
}
|
||||||
|
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
if c.chain == nil {
|
||||||
|
c.chain = defaultMockChain(t)
|
||||||
|
}
|
||||||
|
d := db.SetupDB(t)
|
||||||
|
|
||||||
|
sidecars := make([]*ethpb.BlobSidecar, 0)
|
||||||
|
oldest := c.oldestSlot(t)
|
||||||
|
var parentRoot [32]byte
|
||||||
|
for i := 0; i < c.nblocks; i++ {
|
||||||
|
// check if there is a slot override for this index
|
||||||
|
// ie to create a block outside the minimum_request_epoch
|
||||||
|
var bs types.Slot
|
||||||
|
if c.expired[i] {
|
||||||
|
// the lowest possible bound of the retention period is the deneb epoch, so make sure
|
||||||
|
// the slot of an expired block is at least one slot less than the deneb epoch.
|
||||||
|
bs = oldest - 1 - types.Slot(i)
|
||||||
|
} else {
|
||||||
|
bs = oldest + types.Slot(i)
|
||||||
|
}
|
||||||
|
block, bsc := generateTestBlockWithSidecars(t, parentRoot, bs, maxBlobs)
|
||||||
|
root, err := block.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, sc := range bsc {
|
||||||
|
sidecars = append(sidecars, sc)
|
||||||
|
}
|
||||||
|
util.SaveBlock(t, context.Background(), d, block)
|
||||||
|
parentRoot = root
|
||||||
|
}
|
||||||
|
|
||||||
|
client := p2ptest.NewTestP2P(t)
|
||||||
|
s := &Service{
|
||||||
|
cfg: &config{p2p: client, chain: c.chain, beaconDB: d},
|
||||||
|
rateLimiter: newRateLimiter(client),
|
||||||
|
}
|
||||||
|
|
||||||
|
byRootRate := params.BeaconNetworkConfig().MaxRequestBlobsSidecars * params.BeaconConfig().MaxBlobsPerBlock
|
||||||
|
byRangeRate := params.BeaconNetworkConfig().MaxRequestBlobsSidecars * params.BeaconConfig().MaxBlobsPerBlock
|
||||||
|
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
|
||||||
|
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
|
||||||
|
|
||||||
|
return s, sidecars, cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) run(t *testing.T) {
|
||||||
|
s, sidecars, cleanup := c.setup(t)
|
||||||
|
defer cleanup()
|
||||||
|
req := c.requestFromSidecars(sidecars)
|
||||||
|
expect := c.defineExpected(t, sidecars, req)
|
||||||
|
m := map[types.Slot][]*ethpb.BlobSidecar{}
|
||||||
|
for _, sc := range expect {
|
||||||
|
m[sc.sidecar.Slot] = append(m[sc.sidecar.Slot], sc.sidecar)
|
||||||
|
}
|
||||||
|
for _, blobSidecars := range m {
|
||||||
|
require.NoError(t, s.cfg.beaconDB.SaveBlobSidecar(context.Background(), blobSidecars))
|
||||||
|
}
|
||||||
|
if c.total != nil {
|
||||||
|
require.Equal(t, *c.total, len(expect))
|
||||||
|
}
|
||||||
|
nh := func(stream network.Stream) {
|
||||||
|
for _, ex := range expect {
|
||||||
|
ex.requireExpected(t, s, stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rht := &rpcHandlerTest{
|
||||||
|
t: t,
|
||||||
|
topic: c.topic,
|
||||||
|
timeout: time.Second * 10,
|
||||||
|
err: c.err,
|
||||||
|
s: s,
|
||||||
|
}
|
||||||
|
rht.testHandler(nh, c.serverHandle(s), req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we use max uints for future forks, but this causes overflows when computing slots
|
||||||
|
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||||
|
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||||
|
if cfg.CapellaForkEpoch == math.MaxUint64 {
|
||||||
|
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 100
|
||||||
|
}
|
||||||
|
if cfg.DenebForkEpoch == math.MaxUint64 {
|
||||||
|
cfg.DenebForkEpoch = cfg.CapellaForkEpoch + 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultMockChain(t *testing.T) *mock.ChainService {
|
||||||
|
df, err := forks.Fork(params.BeaconConfig().DenebForkEpoch)
|
||||||
|
require.NoError(t, err)
|
||||||
|
ce := params.BeaconConfig().DenebForkEpoch + params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||||
|
fe := ce - 2
|
||||||
|
cs, err := slots.EpochStart(ce)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return &mock.ChainService{
|
||||||
|
ValidatorsRoot: [32]byte{},
|
||||||
|
Slot: &cs,
|
||||||
|
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||||
|
Fork: df}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
c := &blobsTestCase{nblocks: 10}
|
||||||
|
c.oldestSlot = c.defaultOldestSlotByRoot
|
||||||
|
s, sidecars, cleanup := c.setup(t)
|
||||||
|
req := blobRootRequestFromSidecars(sidecars)
|
||||||
|
expect := c.filterExpectedByRoot(t, sidecars, req)
|
||||||
|
defer cleanup()
|
||||||
|
require.Equal(t, 40, len(sidecars))
|
||||||
|
require.Equal(t, 40, len(expect))
|
||||||
|
for _, sc := range sidecars {
|
||||||
|
blk, err := s.cfg.beaconDB.Block(ctx, bytesutil.ToBytes32(sc.BlockRoot))
|
||||||
|
require.NoError(t, err)
|
||||||
|
var found *int
|
||||||
|
comms, err := blk.Block().Body().BlobKzgCommitments()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for i, cm := range comms {
|
||||||
|
if bytesutil.ToBytes48(sc.KzgCommitment) == bytesutil.ToBytes48(cm) {
|
||||||
|
found = &i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Equal(t, true, found != nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundTripDenebSave(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
repositionFutureEpochs(cfg)
|
||||||
|
undo, err := params.SetActiveWithUndo(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, undo())
|
||||||
|
}()
|
||||||
|
parentRoot := [32]byte{}
|
||||||
|
c := blobsTestCase{nblocks: 10}
|
||||||
|
c.chain = defaultMockChain(t)
|
||||||
|
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.chain.CurrentSlot())))
|
||||||
|
require.NoError(t, err)
|
||||||
|
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock)
|
||||||
|
block, bsc := generateTestBlockWithSidecars(t, parentRoot, oldest, maxBlobs)
|
||||||
|
require.Equal(t, len(block.Block.Body.BlobKzgCommitments), len(bsc))
|
||||||
|
require.Equal(t, maxBlobs, len(bsc))
|
||||||
|
for i := range bsc {
|
||||||
|
require.DeepEqual(t, block.Block.Body.BlobKzgCommitments[i], bsc[i].KzgCommitment)
|
||||||
|
}
|
||||||
|
d := db.SetupDB(t)
|
||||||
|
util.SaveBlock(t, ctx, d, block)
|
||||||
|
root, err := block.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
dbBlock, err := d.Block(ctx, root)
|
||||||
|
require.NoError(t, err)
|
||||||
|
comms, err := dbBlock.Block().Body().BlobKzgCommitments()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, maxBlobs, len(comms))
|
||||||
|
for i := range bsc {
|
||||||
|
require.DeepEqual(t, comms[i], bsc[i].KzgCommitment)
|
||||||
|
}
|
||||||
|
}
|
||||||
136
beacon-chain/sync/block_and_blob_queue.go
Normal file
136
beacon-chain/sync/block_and_blob_queue.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
type blockAndBlobs struct {
|
||||||
|
blk interfaces.ReadOnlySignedBeaconBlock
|
||||||
|
blobs []*eth.BlobSidecar
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockAndBlocksQueue struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
queue map[[32]byte]blockAndBlobs
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBlockAndBlobs() *blockAndBlocksQueue {
|
||||||
|
return &blockAndBlocksQueue{
|
||||||
|
queue: make(map[[32]byte]blockAndBlobs),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) addBlock(b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
|
q.lock.Lock()
|
||||||
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
if b.Version() < version.Deneb {
|
||||||
|
return errors.New("block version is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := b.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bnb, ok := q.queue[r]
|
||||||
|
if !ok {
|
||||||
|
q.queue[r] = blockAndBlobs{
|
||||||
|
blk: b,
|
||||||
|
blobs: make([]*eth.BlobSidecar, 0, 4),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bnb.blk = b
|
||||||
|
q.queue[r] = bnb
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) addBlob(b *eth.BlobSidecar) error {
|
||||||
|
q.lock.Lock()
|
||||||
|
defer q.lock.Unlock()
|
||||||
|
r := bytesutil.ToBytes32(b.BlockRoot)
|
||||||
|
|
||||||
|
bnb, ok := q.queue[r]
|
||||||
|
if !ok {
|
||||||
|
q.queue[r] = blockAndBlobs{
|
||||||
|
blobs: make([]*eth.BlobSidecar, 0, 4),
|
||||||
|
}
|
||||||
|
bnb = q.queue[r]
|
||||||
|
}
|
||||||
|
bnb.blobs = append(bnb.blobs, b)
|
||||||
|
q.queue[r] = bnb
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) getBlock(r [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
|
q.lock.RLock()
|
||||||
|
defer q.lock.RUnlock()
|
||||||
|
|
||||||
|
bnb, ok := q.queue[r]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("block does not exist")
|
||||||
|
}
|
||||||
|
if bnb.blk == nil {
|
||||||
|
return nil, errors.New("block does not exist")
|
||||||
|
}
|
||||||
|
return bnb.blk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) getBlob(r [32]byte, i uint64) (*eth.BlobSidecar, error) {
|
||||||
|
q.lock.RLock()
|
||||||
|
defer q.lock.RUnlock()
|
||||||
|
|
||||||
|
if i >= params.MaxBlobsPerBlock {
|
||||||
|
return nil, errors.New("request out of bounds")
|
||||||
|
}
|
||||||
|
|
||||||
|
bnb, ok := q.queue[r]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("blob does not exist")
|
||||||
|
}
|
||||||
|
for _, blob := range bnb.blobs {
|
||||||
|
if i == blob.Index {
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("blob does not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) delete(r [32]byte) {
|
||||||
|
q.lock.Lock()
|
||||||
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
delete(q.queue, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *blockAndBlocksQueue) canImport(r [32]byte) (bool, error) {
|
||||||
|
q.lock.RLock()
|
||||||
|
defer q.lock.RUnlock()
|
||||||
|
|
||||||
|
bnb, ok := q.queue[r]
|
||||||
|
if !ok {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if bnb.blk == nil || bnb.blk.IsNil() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
commitments, err := bnb.blk.Block().Body().BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: enhance check to ensure that block commitments match blob
|
||||||
|
|
||||||
|
return len(commitments) == len(bnb.blobs), nil
|
||||||
|
}
|
||||||
108
beacon-chain/sync/block_and_blob_queue_test.go
Normal file
108
beacon-chain/sync/block_and_blob_queue_test.go
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_blockAndBlocksQueue(t *testing.T) {
|
||||||
|
t.Run("no block", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
_, err := q.getBlock([32]byte{})
|
||||||
|
require.ErrorContains(t, "block does not exist", err)
|
||||||
|
canImport, err := q.canImport([32]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, canImport)
|
||||||
|
})
|
||||||
|
t.Run("no blob", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
_, err := q.getBlob([32]byte{}, 0)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
canImport, err := q.canImport([32]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, canImport)
|
||||||
|
})
|
||||||
|
t.Run("has block, no blob", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockDeneb(ð.SignedBeaconBlockDeneb{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, q.addBlock(sb))
|
||||||
|
_, err = q.getBlob([32]byte{}, 0)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
canImport, err := q.canImport([32]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, canImport)
|
||||||
|
})
|
||||||
|
t.Run("has one blob, no block", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
require.NoError(t, q.addBlob(ð.BlobSidecar{Index: 1}))
|
||||||
|
_, err := q.getBlock([32]byte{})
|
||||||
|
require.ErrorContains(t, "block does not exist", err)
|
||||||
|
_, err = q.getBlob([32]byte{}, 0)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
_, err = q.getBlob([32]byte{}, 2)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
_, err = q.getBlob([32]byte{}, 3)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
canImport, err := q.canImport([32]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, canImport)
|
||||||
|
})
|
||||||
|
t.Run("has everything", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockDeneb(ð.SignedBeaconBlockDeneb{Block: ð.BeaconBlockDeneb{}}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
b := bytesutil.PadTo([]byte("kzg"), 48)
|
||||||
|
require.NoError(t, sb.SetBlobKzgCommitments([][]byte{b, b, b}))
|
||||||
|
r, err := sb.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, q.addBlock(sb))
|
||||||
|
require.NoError(t, q.addBlob(ð.BlobSidecar{BlockRoot: r[:], Index: 0}))
|
||||||
|
require.NoError(t, q.addBlob(ð.BlobSidecar{BlockRoot: r[:], Index: 1}))
|
||||||
|
require.NoError(t, q.addBlob(ð.BlobSidecar{BlockRoot: r[:], Index: 2}))
|
||||||
|
blk, err := q.getBlock(r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, sb, blk)
|
||||||
|
blob, err := q.getBlob(r, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(0), blob.Index)
|
||||||
|
blob, err = q.getBlob(r, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(1), blob.Index)
|
||||||
|
blob, err = q.getBlob(r, 2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(2), blob.Index)
|
||||||
|
canImport, err := q.canImport(r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, canImport)
|
||||||
|
})
|
||||||
|
t.Run("only has higher index", func(t *testing.T) {
|
||||||
|
q := newBlockAndBlobs()
|
||||||
|
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockDeneb(ð.SignedBeaconBlockDeneb{Block: ð.BeaconBlockDeneb{}}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
b := bytesutil.PadTo([]byte("kzg"), 48)
|
||||||
|
require.NoError(t, sb.SetBlobKzgCommitments([][]byte{b, b, b}))
|
||||||
|
r, err := sb.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, q.addBlock(sb))
|
||||||
|
require.NoError(t, q.addBlob(ð.BlobSidecar{BlockRoot: r[:], Index: 2}))
|
||||||
|
blk, err := q.getBlock(r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, sb, blk)
|
||||||
|
_, err = q.getBlob(r, 0)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
_, err = q.getBlob(r, 1)
|
||||||
|
require.ErrorContains(t, "blob does not exist", err)
|
||||||
|
blob, err := q.getBlob(r, 2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(2), blob.Index)
|
||||||
|
canImport, err := q.canImport(r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, canImport)
|
||||||
|
})
|
||||||
|
}
|
||||||
184
beacon-chain/sync/block_batcher.go
Normal file
184
beacon-chain/sync/block_batcher.go
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
)
|
||||||
|
|
||||||
|
type canonicalChecker func(context.Context, [32]byte) (bool, error)
|
||||||
|
|
||||||
|
// filters all the provided blocks to ensure they are canonical
|
||||||
|
// and are strictly linear.
|
||||||
|
func filterCanonical(ctx context.Context, blks []blocks.ROBlock, prevRoot *[32]byte, canonical canonicalChecker) ([]blocks.ROBlock, []blocks.ROBlock, error) {
|
||||||
|
seq := make([]blocks.ROBlock, 0, len(blks))
|
||||||
|
nseq := make([]blocks.ROBlock, 0)
|
||||||
|
for i, b := range blks {
|
||||||
|
cb, err := canonical(ctx, b.Root())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if !cb {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// filterCanonical is called in batches, so prevRoot can be the last root from the previous batch.
|
||||||
|
// prevRoot will be the zero value until we find the first canonical block in a given request.
|
||||||
|
first := *prevRoot == [32]byte{}
|
||||||
|
// We assume blocks are processed in order, so the previous canonical root should be the parent of the next.
|
||||||
|
// If the current block isn't descended from the last, something is wrong. Append everything remaining
|
||||||
|
// to the list of non-sequential blocks and stop building the canonical list.
|
||||||
|
if !first && *prevRoot != b.Block().ParentRoot() {
|
||||||
|
nseq = append(nseq, blks[i:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
seq = append(seq, blks[i])
|
||||||
|
// Set the previous root as the
|
||||||
|
// newly added block's root
|
||||||
|
currRoot := b.Root()
|
||||||
|
*prevRoot = currRoot
|
||||||
|
}
|
||||||
|
return seq, nseq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns a copy of the []ROBlock list in sorted order with duplicates removed
|
||||||
|
func sortedUniqueBlocks(blks []blocks.ROBlock) []blocks.ROBlock {
|
||||||
|
// Remove duplicate blocks received
|
||||||
|
sort.Sort(blocks.ROBlockSlice(blks))
|
||||||
|
u := 0
|
||||||
|
for i := 1; i < len(blks); i++ {
|
||||||
|
if blks[i].Root() != blks[u].Root() {
|
||||||
|
u += 1
|
||||||
|
if u != i {
|
||||||
|
blks[u] = blks[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return blks[:u+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockBatch struct {
|
||||||
|
start primitives.Slot
|
||||||
|
end primitives.Slot
|
||||||
|
seq []blocks.ROBlock
|
||||||
|
nonseq []blocks.ROBlock
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb blockBatch) RateLimitCost() int {
|
||||||
|
return int(bb.end - bb.start)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb blockBatch) Sequence() []blocks.ROBlock {
|
||||||
|
return bb.seq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb blockBatch) SequenceBroken() bool {
|
||||||
|
return len(bb.nonseq) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb blockBatch) Err() error {
|
||||||
|
return bb.err
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockRangeBatcher struct {
|
||||||
|
start primitives.Slot
|
||||||
|
end primitives.Slot
|
||||||
|
size uint64
|
||||||
|
db db.NoHeadAccessDatabase
|
||||||
|
limiter *limiter
|
||||||
|
isCanonical canonicalChecker
|
||||||
|
ticker *time.Ticker
|
||||||
|
|
||||||
|
lastSeq [32]byte
|
||||||
|
current *blockBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *blockRangeBatcher) genesisBlock(ctx context.Context) (blocks.ROBlock, error) {
|
||||||
|
b, err := bb.db.GenesisBlock(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return blocks.ROBlock{}, err
|
||||||
|
}
|
||||||
|
htr, err := b.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return blocks.ROBlock{}, err
|
||||||
|
}
|
||||||
|
return blocks.NewROBlock(b, htr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBlockBatch(start, reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
|
||||||
|
if start > reqEnd {
|
||||||
|
return blockBatch{}, false
|
||||||
|
}
|
||||||
|
nb := blockBatch{start: start, end: start.Add(size - 1)}
|
||||||
|
if nb.end > reqEnd {
|
||||||
|
nb.end = reqEnd
|
||||||
|
}
|
||||||
|
return nb, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bat blockBatch) Next(reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
|
||||||
|
if bat.SequenceBroken() {
|
||||||
|
return blockBatch{}, false
|
||||||
|
}
|
||||||
|
return newBlockBatch(bat.end.Add(1), reqEnd, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *blockRangeBatcher) Next(ctx context.Context, stream libp2pcore.Stream) (blockBatch, bool) {
|
||||||
|
var nb blockBatch
|
||||||
|
var ok bool
|
||||||
|
if bb.current != nil {
|
||||||
|
current := *bb.current
|
||||||
|
nb, ok = current.Next(bb.end, bb.size)
|
||||||
|
} else {
|
||||||
|
nb, ok = newBlockBatch(bb.start, bb.end, bb.size)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return blockBatch{}, false
|
||||||
|
}
|
||||||
|
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||||
|
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// block if there is work to do, unless this is the first batch
|
||||||
|
if bb.ticker != nil && bb.current != nil {
|
||||||
|
<-bb.ticker.C
|
||||||
|
}
|
||||||
|
filter := filters.NewFilter().SetStartSlot(nb.start).SetEndSlot(nb.end)
|
||||||
|
blks, roots, err := bb.db.Blocks(ctx, filter)
|
||||||
|
if err != nil {
|
||||||
|
return blockBatch{err: errors.Wrap(err, "Could not retrieve blocks")}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// make slice with extra +1 capacity in case we want to grow it to also hold the genesis block
|
||||||
|
rob := make([]blocks.ROBlock, len(blks), len(blks)+1)
|
||||||
|
goff := 0 // offset for genesis value
|
||||||
|
if nb.start == 0 {
|
||||||
|
gb, err := bb.genesisBlock(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return blockBatch{err: errors.Wrap(err, "could not retrieve genesis block")}, false
|
||||||
|
}
|
||||||
|
rob = append(rob, blocks.ROBlock{}) // grow the slice to its capacity to hold the genesis block
|
||||||
|
rob[0] = gb
|
||||||
|
goff = 1
|
||||||
|
}
|
||||||
|
for i := 0; i < len(blks); i++ {
|
||||||
|
rob[goff+i] = blocks.NewROBlock(blks[i], roots[i])
|
||||||
|
}
|
||||||
|
// Filter and sort our retrieved blocks, so that
|
||||||
|
// we only return valid sets of blocks.
|
||||||
|
rob = sortedUniqueBlocks(rob)
|
||||||
|
|
||||||
|
nb.seq, nb.nonseq, nb.err = filterCanonical(ctx, rob, &bb.lastSeq, bb.isCanonical)
|
||||||
|
|
||||||
|
// Decrease allowed blocks capacity by the number of streamed blocks.
|
||||||
|
bb.limiter.add(stream, int64(1+nb.end.SubSlot(nb.start)))
|
||||||
|
bb.current = &nb
|
||||||
|
return *bb.current, true
|
||||||
|
}
|
||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
@@ -15,33 +14,25 @@ import (
|
|||||||
func TestSortedObj_SortBlocksRoots(t *testing.T) {
|
func TestSortedObj_SortBlocksRoots(t *testing.T) {
|
||||||
source := rand.NewSource(33)
|
source := rand.NewSource(33)
|
||||||
randGen := rand.New(source)
|
randGen := rand.New(source)
|
||||||
var blks []interfaces.ReadOnlySignedBeaconBlock
|
|
||||||
var roots [][32]byte
|
|
||||||
randFunc := func() int64 {
|
randFunc := func() int64 {
|
||||||
return randGen.Int63n(50)
|
return randGen.Int63n(50)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var blks []blocks.ROBlock
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
slot := primitives.Slot(randFunc())
|
slot := primitives.Slot(randFunc())
|
||||||
newBlk, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: slot, Body: ðpb.BeaconBlockBody{}}})
|
newBlk, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: slot, Body: ðpb.BeaconBlockBody{}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
blks = append(blks, newBlk)
|
|
||||||
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
|
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
|
||||||
roots = append(roots, root)
|
blks = append(blks, blocks.NewROBlock(newBlk, root))
|
||||||
}
|
}
|
||||||
|
|
||||||
r := &Service{}
|
newBlks := sortedUniqueBlocks(blks)
|
||||||
|
|
||||||
newBlks, newRoots := r.sortBlocksAndRoots(blks, roots)
|
|
||||||
|
|
||||||
previousSlot := primitives.Slot(0)
|
previousSlot := primitives.Slot(0)
|
||||||
for i, b := range newBlks {
|
for _, b := range newBlks {
|
||||||
if b.Block().Slot() < previousSlot {
|
if b.Block().Slot() < previousSlot {
|
||||||
t.Errorf("Block list is not sorted as %d is smaller than previousSlot %d", b.Block().Slot(), previousSlot)
|
t.Errorf("Block list is not sorted as %d is smaller than previousSlot %d", b.Block().Slot(), previousSlot)
|
||||||
}
|
}
|
||||||
if bytesutil.FromBytes8(newRoots[i][:]) != uint64(b.Block().Slot()) {
|
|
||||||
t.Errorf("root doesn't match stored slot in block: wanted %d but got %d", b.Block().Slot(), bytesutil.FromBytes8(newRoots[i][:]))
|
|
||||||
}
|
|
||||||
previousSlot = b.Block().Slot()
|
previousSlot = b.Block().Slot()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -49,8 +40,7 @@ func TestSortedObj_SortBlocksRoots(t *testing.T) {
|
|||||||
func TestSortedObj_NoDuplicates(t *testing.T) {
|
func TestSortedObj_NoDuplicates(t *testing.T) {
|
||||||
source := rand.NewSource(33)
|
source := rand.NewSource(33)
|
||||||
randGen := rand.New(source)
|
randGen := rand.New(source)
|
||||||
var blks []interfaces.ReadOnlySignedBeaconBlock
|
var blks []blocks.ROBlock
|
||||||
var roots [][32]byte
|
|
||||||
randFunc := func() int64 {
|
randFunc := func() int64 {
|
||||||
return randGen.Int63n(50)
|
return randGen.Int63n(50)
|
||||||
}
|
}
|
||||||
@@ -63,23 +53,16 @@ func TestSortedObj_NoDuplicates(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsbCopy, err := wsb.Copy()
|
wsbCopy, err := wsb.Copy()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
blks = append(blks, wsb, wsbCopy)
|
|
||||||
|
|
||||||
// append twice
|
|
||||||
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
|
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
|
||||||
roots = append(roots, root, root)
|
blks = append(blks, blocks.NewROBlock(wsb, root), blocks.NewROBlock(wsbCopy, root))
|
||||||
}
|
}
|
||||||
|
|
||||||
r := &Service{}
|
dedup := sortedUniqueBlocks(blks)
|
||||||
|
roots := make(map[[32]byte]int)
|
||||||
newBlks, newRoots, err := r.dedupBlocksAndRoots(blks, roots)
|
for i, b := range dedup {
|
||||||
require.NoError(t, err)
|
if di, dup := roots[b.Root()]; dup {
|
||||||
|
t.Errorf("Duplicated root %#x at index %d and %d", b.Root(), di, i)
|
||||||
rootMap := make(map[[32]byte]bool)
|
|
||||||
for i, b := range newBlks {
|
|
||||||
if rootMap[newRoots[i]] {
|
|
||||||
t.Errorf("Duplicated root exists %#x with block %v", newRoots[i], b)
|
|
||||||
}
|
}
|
||||||
rootMap[newRoots[i]] = true
|
roots[b.Root()] = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -52,25 +52,33 @@ func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher)
|
|||||||
|
|
||||||
// retrieve expected context depending on rpc topic schema version.
|
// retrieve expected context depending on rpc topic schema version.
|
||||||
func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
||||||
_, _, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
_, message, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var provideCtx bool
|
||||||
switch version {
|
switch version {
|
||||||
case p2p.SchemaVersionV1:
|
case p2p.SchemaVersionV1:
|
||||||
// Return empty context for a v1 method.
|
// and if it doesn't exist then we assume it's a new topic introduced after altair
|
||||||
return []byte{}, nil
|
provideCtx = !p2p.PreAltairV1SchemaMapping[message]
|
||||||
case p2p.SchemaVersionV2:
|
case p2p.SchemaVersionV2:
|
||||||
currFork := chain.CurrentFork()
|
provideCtx = true
|
||||||
genRoot := chain.GenesisValidatorsRoot()
|
|
||||||
digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return digest[:], nil
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("invalid version of %s registered for topic: %s")
|
return nil, errors.New("invalid version of %s registered for topic: %s")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !provideCtx {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
currFork := chain.CurrentFork()
|
||||||
|
genRoot := chain.GenesisValidatorsRoot()
|
||||||
|
digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return digest[:], nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Minimal interface for a stream with a protocol.
|
// Minimal interface for a stream with a protocol.
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
|||||||
// differentiate them below.
|
// differentiate them below.
|
||||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||||
|
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||||
|
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlobSidecar{})]
|
||||||
}
|
}
|
||||||
|
|
||||||
base := p2p.GossipTopicMappings(topic, 0)
|
base := p2p.GossipTopicMappings(topic, 0)
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
var responseCodeSuccess = byte(0x00)
|
var responseCodeSuccess = byte(0x00)
|
||||||
var responseCodeInvalidRequest = byte(0x01)
|
var responseCodeInvalidRequest = byte(0x01)
|
||||||
var responseCodeServerError = byte(0x02)
|
var responseCodeServerError = byte(0x02)
|
||||||
|
var responseCodeResourceUnavailable = byte(0x03)
|
||||||
|
|
||||||
func (s *Service) generateErrorResponse(code byte, reason string) ([]byte, error) {
|
func (s *Service) generateErrorResponse(code byte, reason string) ([]byte, error) {
|
||||||
return createErrorResponse(code, reason, s.cfg.p2p)
|
return createErrorResponse(code, reason, s.cfg.p2p)
|
||||||
|
|||||||
@@ -63,6 +63,9 @@ func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error {
|
|||||||
if nextEpoch == params.BeaconConfig().AltairForkEpoch {
|
if nextEpoch == params.BeaconConfig().AltairForkEpoch {
|
||||||
s.registerRPCHandlersAltair()
|
s.registerRPCHandlersAltair()
|
||||||
}
|
}
|
||||||
|
if nextEpoch == params.BeaconConfig().DenebForkEpoch {
|
||||||
|
s.registerRPCHandlersDeneb()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -304,6 +304,35 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
|||||||
return nil, "", errNoPeersAvailable
|
return nil, "", errNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: need to fix the blobs by range rpc to bring this back
|
||||||
|
/*
|
||||||
|
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||||
|
func (f *blocksFetcher) fetchBlobsFromPeer(
|
||||||
|
ctx context.Context,
|
||||||
|
start primitives.Slot, count uint64,
|
||||||
|
peers []peer.ID,
|
||||||
|
) ([]*p2ppb.BlobSidecar, peer.ID, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||||
|
req := &p2ppb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: start,
|
||||||
|
Count: count,
|
||||||
|
}
|
||||||
|
for i := 0; i < len(peers); i++ {
|
||||||
|
blobs, err := f.requestBlobs(ctx, req, peers[i])
|
||||||
|
if err == nil {
|
||||||
|
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(peers[i])
|
||||||
|
return blobs, peers[i], err
|
||||||
|
} else {
|
||||||
|
log.WithError(err).Debug("Could not request blobs by range")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, "", errNoPeersAvailable
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||||
func (f *blocksFetcher) requestBlocks(
|
func (f *blocksFetcher) requestBlocks(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@@ -334,6 +363,32 @@ func (f *blocksFetcher) requestBlocks(
|
|||||||
return prysmsync.SendBeaconBlocksByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
|
return prysmsync.SendBeaconBlocksByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecarsByRangeRequest, pid peer.ID) ([]*p2ppb.BlobSidecar, error) {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
l := f.peerLock(pid)
|
||||||
|
l.Lock()
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"peer": pid,
|
||||||
|
"start": req.StartSlot,
|
||||||
|
"count": req.Count,
|
||||||
|
"capacity": f.rateLimiter.Remaining(pid.String()),
|
||||||
|
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||||
|
}).Debug("Requesting blobs")
|
||||||
|
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {
|
||||||
|
if err := f.waitForBandwidth(pid, req.Count); err != nil {
|
||||||
|
l.Unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||||
|
l.Unlock()
|
||||||
|
return prysmsync.SendBlobsSidecarsByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// requestBlocksByRoot is a wrapper for handling BeaconBlockByRootsReq requests/streams.
|
// requestBlocksByRoot is a wrapper for handling BeaconBlockByRootsReq requests/streams.
|
||||||
func (f *blocksFetcher) requestBlocksByRoot(
|
func (f *blocksFetcher) requestBlocksByRoot(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
|||||||
@@ -81,6 +81,9 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
for data := range queue.fetchedData {
|
for data := range queue.fetchedData {
|
||||||
|
// If blobs are available. Verify blobs and blocks are consistence.
|
||||||
|
// We can't import a block if there's no associated blob within DA bound.
|
||||||
|
// The blob has to pass aggregated proof check.
|
||||||
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
|
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ type blockchainService interface {
|
|||||||
// Config to set up the initial sync service.
|
// Config to set up the initial sync service.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
P2P p2p.P2P
|
P2P p2p.P2P
|
||||||
DB db.ReadOnlyDatabase
|
DB db.NoHeadAccessDatabase
|
||||||
Chain blockchainService
|
Chain blockchainService
|
||||||
StateNotifier statefeed.Notifier
|
StateNotifier statefeed.Notifier
|
||||||
BlockNotifier blockfeed.Notifier
|
BlockNotifier blockfeed.Notifier
|
||||||
|
|||||||
@@ -82,6 +82,13 @@ var (
|
|||||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
rpcBlobsByRangeResponseLatency = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "rpc_blobs_by_range_response_latency_milliseconds",
|
||||||
|
Help: "Captures total time to respond to rpc BlobsByRange requests in a milliseconds distribution",
|
||||||
|
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||||
|
},
|
||||||
|
)
|
||||||
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
||||||
prometheus.HistogramOpts{
|
prometheus.HistogramOpts{
|
||||||
Name: "block_arrival_latency_milliseconds",
|
Name: "block_arrival_latency_milliseconds",
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/v4/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
|
||||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
@@ -19,6 +18,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/equality"
|
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/equality"
|
||||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/trailofbits/go-mutexasserts"
|
"github.com/trailofbits/go-mutexasserts"
|
||||||
@@ -159,21 +159,20 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot); err != nil {
|
if b.Version() >= version.Deneb {
|
||||||
if blockchain.IsInvalidBlock(err) {
|
if err = s.blockAndBlobs.addBlock(b); err != nil {
|
||||||
r := blockchain.InvalidBlockRoot(err)
|
log.WithError(err).Error("Could not add block to block and blobs pool")
|
||||||
if r != [32]byte{} {
|
continue
|
||||||
s.setBadBlock(ctx, r) // Setting head block as bad.
|
}
|
||||||
} else {
|
if err = s.importBlockAndBlobs(ctx, blkRoot); err != nil {
|
||||||
s.setBadBlock(ctx, blkRoot)
|
log.WithError(err).Error("Could not import block and blobs")
|
||||||
}
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err = s.receiveBlock(ctx, b, blkRoot); err != nil {
|
||||||
|
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not process block")
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not process block")
|
|
||||||
|
|
||||||
// In the next iteration of the queue, this block will be removed from
|
|
||||||
// the pending queue as it has been marked as a 'bad' block.
|
|
||||||
span.End()
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
|
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
|
||||||
@@ -182,10 +181,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
|||||||
pb, err := b.Proto()
|
pb, err := b.Proto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Debug("Could not get protobuf block")
|
log.WithError(err).Debug("Could not get protobuf block")
|
||||||
} else {
|
}
|
||||||
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
|
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
|
||||||
log.WithError(err).Debug("Could not broadcast block")
|
log.WithError(err).Debug("Could not broadcast block")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.pendingQueueLock.Lock()
|
s.pendingQueueLock.Lock()
|
||||||
@@ -257,10 +255,11 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
|||||||
if len(roots) > int(params.BeaconNetworkConfig().MaxRequestBlocks) {
|
if len(roots) > int(params.BeaconNetworkConfig().MaxRequestBlocks) {
|
||||||
req = roots[:params.BeaconNetworkConfig().MaxRequestBlocks]
|
req = roots[:params.BeaconNetworkConfig().MaxRequestBlocks]
|
||||||
}
|
}
|
||||||
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
if err := s.sendRecentBeaconBlocksAndBlobsRequest(ctx, &req, pid); err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
log.WithError(err).Debug("Could not send recent block request")
|
log.WithError(err).Debug("Could not send recent block request")
|
||||||
}
|
}
|
||||||
|
|
||||||
newRoots := make([][32]byte, 0, len(roots))
|
newRoots := make([][32]byte, 0, len(roots))
|
||||||
s.pendingQueueLock.RLock()
|
s.pendingQueueLock.RLock()
|
||||||
for _, rt := range roots {
|
for _, rt := range roots {
|
||||||
@@ -280,21 +279,35 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ *Service) dedupRoots(roots [][32]byte) [][32]byte {
|
||||||
|
newRoots := make([][32]byte, 0, len(roots))
|
||||||
|
rootMap := make(map[[32]byte]bool, len(roots))
|
||||||
|
for i, r := range roots {
|
||||||
|
if rootMap[r] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rootMap[r] = true
|
||||||
|
newRoots = append(newRoots, roots[i])
|
||||||
|
}
|
||||||
|
return newRoots
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) sortedPendingSlots() []primitives.Slot {
|
func (s *Service) sortedPendingSlots() []primitives.Slot {
|
||||||
s.pendingQueueLock.RLock()
|
s.pendingQueueLock.RLock()
|
||||||
defer s.pendingQueueLock.RUnlock()
|
defer s.pendingQueueLock.RUnlock()
|
||||||
|
|
||||||
items := s.slotToPendingBlocks.Items()
|
items := s.slotToPendingBlocks.Items()
|
||||||
|
|
||||||
ss := make([]primitives.Slot, 0, len(items))
|
ss1 := make([]primitives.Slot, 0, len(items))
|
||||||
for k := range items {
|
for k := range items {
|
||||||
slot := cacheKeyToSlot(k)
|
slot := cacheKeyToSlot(k)
|
||||||
ss = append(ss, slot)
|
ss1 = append(ss1, slot)
|
||||||
}
|
}
|
||||||
sort.Slice(ss, func(i, j int) bool {
|
sort.Slice(ss1, func(i, j int) bool {
|
||||||
return ss[i] < ss[j]
|
return ss1[i] < ss1[j]
|
||||||
})
|
})
|
||||||
return ss
|
|
||||||
|
return ss1
|
||||||
}
|
}
|
||||||
|
|
||||||
// validatePendingSlots validates the pending blocks
|
// validatePendingSlots validates the pending blocks
|
||||||
@@ -311,9 +324,11 @@ func (s *Service) validatePendingSlots() error {
|
|||||||
return errors.New("slotToPendingBlocks cache can't be nil")
|
return errors.New("slotToPendingBlocks cache can't be nil")
|
||||||
}
|
}
|
||||||
items := s.slotToPendingBlocks.Items()
|
items := s.slotToPendingBlocks.Items()
|
||||||
|
|
||||||
for k := range items {
|
for k := range items {
|
||||||
slot := cacheKeyToSlot(k)
|
slot := cacheKeyToSlot(k)
|
||||||
blks := s.pendingBlocksInCache(slot)
|
blks := s.pendingBlocksInCache(slot)
|
||||||
|
|
||||||
for _, b := range blks {
|
for _, b := range blks {
|
||||||
epoch := slots.ToEpoch(slot)
|
epoch := slots.ToEpoch(slot)
|
||||||
// remove all descendant blocks of old blocks
|
// remove all descendant blocks of old blocks
|
||||||
@@ -392,7 +407,9 @@ func (s *Service) deleteBlockFromPendingQueue(slot primitives.Slot, b interfaces
|
|||||||
if err := s.slotToPendingBlocks.Replace(slotToCacheKey(slot), newBlks, d); err != nil {
|
if err := s.slotToPendingBlocks.Replace(slotToCacheKey(slot), newBlks, d); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(s.seenPendingBlocks, r)
|
delete(s.seenPendingBlocks, r)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -830,6 +830,7 @@ func TestService_ProcessBadPendingBlocks(t *testing.T) {
|
|||||||
bA, err := blocks.NewSignedBeaconBlock(b)
|
bA, err := blocks.NewSignedBeaconBlock(b)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// TODO: using the old deprecated type here because that's what the code expects, will get fixed with new sync queue
|
||||||
// Add block1 for slot 55
|
// Add block1 for slot 55
|
||||||
require.NoError(t, r.insertBlockToPendingQueue(b.Block.Slot, bA, b1Root))
|
require.NoError(t, r.insertBlockToPendingQueue(b.Block.Slot, bA, b1Root))
|
||||||
bB, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
bB, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ const defaultBurstLimit = 5
|
|||||||
const leakyBucketPeriod = 1 * time.Second
|
const leakyBucketPeriod = 1 * time.Second
|
||||||
|
|
||||||
// Only allow in 2 batches per minute.
|
// Only allow in 2 batches per minute.
|
||||||
const blockBucketPeriod = 30 * time.Second
|
const blockBucketPeriod = 5 * time.Second
|
||||||
|
|
||||||
// Dummy topic to validate all incoming rpc requests.
|
// Dummy topic to validate all incoming rpc requests.
|
||||||
const rpcLimiterTopic = "rpc-limiter-topic"
|
const rpcLimiterTopic = "rpc-limiter-topic"
|
||||||
@@ -59,6 +59,8 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
|||||||
// Collector for V2
|
// Collector for V2
|
||||||
blockCollectorV2 := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, blockBucketPeriod, false /* deleteEmptyBuckets */)
|
blockCollectorV2 := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, blockBucketPeriod, false /* deleteEmptyBuckets */)
|
||||||
|
|
||||||
|
blobCollector := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, blockBucketPeriod, false /* deleteEmptyBuckets */)
|
||||||
|
|
||||||
// BlocksByRoots requests
|
// BlocksByRoots requests
|
||||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
||||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
||||||
@@ -67,6 +69,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
|||||||
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV1)] = blockCollector
|
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV1)] = blockCollector
|
||||||
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV2)] = blockCollectorV2
|
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV2)] = blockCollectorV2
|
||||||
|
|
||||||
|
// BlobByRange and BlobByRoot
|
||||||
|
topicMap[addEncoding(p2p.RPCBlobSidecarsByRootTopicV1)] = blobCollector
|
||||||
|
topicMap[addEncoding(p2p.RPCBlobSidecarsByRangeTopicV1)] = blobCollector
|
||||||
|
|
||||||
// General topic for all rpc requests.
|
// General topic for all rpc requests.
|
||||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ import (
|
|||||||
|
|
||||||
func TestNewRateLimiter(t *testing.T) {
|
func TestNewRateLimiter(t *testing.T) {
|
||||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||||
assert.Equal(t, len(rlimiter.limiterMap), 10, "correct number of topics not registered")
|
assert.Equal(t, len(rlimiter.limiterMap), 12, "correct number of topics not registered")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||||
|
|||||||
@@ -49,6 +49,9 @@ func (s *Service) registerRPCHandlers() {
|
|||||||
s.pingHandler,
|
s.pingHandler,
|
||||||
)
|
)
|
||||||
s.registerRPCHandlersAltair()
|
s.registerRPCHandlersAltair()
|
||||||
|
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
s.registerRPCHandlersDeneb()
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.registerRPC(
|
s.registerRPC(
|
||||||
@@ -83,6 +86,7 @@ func (s *Service) registerRPCHandlersAltair() {
|
|||||||
p2p.RPCBlocksByRangeTopicV2,
|
p2p.RPCBlocksByRangeTopicV2,
|
||||||
s.beaconBlocksByRangeRPCHandler,
|
s.beaconBlocksByRangeRPCHandler,
|
||||||
)
|
)
|
||||||
|
// TODO(Deneb): Unregister this post Deneb fork epoch.
|
||||||
s.registerRPC(
|
s.registerRPC(
|
||||||
p2p.RPCBlocksByRootTopicV2,
|
p2p.RPCBlocksByRootTopicV2,
|
||||||
s.beaconBlocksRootRPCHandler,
|
s.beaconBlocksRootRPCHandler,
|
||||||
@@ -93,6 +97,17 @@ func (s *Service) registerRPCHandlersAltair() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) registerRPCHandlersDeneb() {
|
||||||
|
s.registerRPC(
|
||||||
|
p2p.RPCBlobSidecarsByRangeTopicV1,
|
||||||
|
s.blobSidecarsByRangeRPCHandler,
|
||||||
|
)
|
||||||
|
s.registerRPC(
|
||||||
|
p2p.RPCBlobSidecarsByRootTopicV1,
|
||||||
|
s.blobSidecarByRootRPCHandler,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// Remove all v1 Stream handlers that are no longer supported
|
// Remove all v1 Stream handlers that are no longer supported
|
||||||
// from altair onwards.
|
// from altair onwards.
|
||||||
func (s *Service) unregisterPhase0Handlers() {
|
func (s *Service) unregisterPhase0Handlers() {
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if endSlot-startSlot > rangeLimit {
|
if endSlot-startSlot > primitives.Slot(rangeLimit) {
|
||||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidRequest.Error(), stream)
|
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidRequest.Error(), stream)
|
||||||
err := p2ptypes.ErrInvalidRequest
|
err := p2ptypes.ErrInvalidRequest
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
@@ -244,7 +244,7 @@ func (s *Service) validateRangeRequest(r *pb.BeaconBlocksByRangeRequest) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
endSlot := startSlot.Add(step * (count - 1))
|
endSlot := startSlot.Add(step * (count - 1))
|
||||||
if endSlot-startSlot > rangeLimit {
|
if endSlot-startSlot > primitives.Slot(rangeLimit) {
|
||||||
return p2ptypes.ErrInvalidRequest
|
return p2ptypes.ErrInvalidRequest
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -291,10 +291,6 @@ func (s *Service) filterBlocks(ctx context.Context, blks []interfaces.ReadOnlySi
|
|||||||
return newBlks, nil
|
return newBlks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
|
||||||
writeErrorResponseToStream(responseCode, reason, stream, s.cfg.p2p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) retrieveGenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, [32]byte, error) {
|
func (s *Service) retrieveGenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, [32]byte, error) {
|
||||||
genBlock, err := s.cfg.beaconDB.GenesisBlock(ctx)
|
genBlock, err := s.cfg.beaconDB.GenesisBlock(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -11,14 +11,17 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// sendRecentBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
|
// sendRecentBeaconBlocksAndBlobsRequest sends a recent beacon blocks request to a peer to get
|
||||||
// those corresponding blocks from that peer.
|
// those corresponding blocks from that peer.
|
||||||
func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots *types.BeaconBlockByRootsReq, id peer.ID) error {
|
func (s *Service) sendRecentBeaconBlocksAndBlobsRequest(ctx context.Context, blockRoots *types.BeaconBlockByRootsReq, id peer.ID) error {
|
||||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
var requestBlobs map[[32]byte]int
|
||||||
_, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.chain, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
_, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.chain, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
blkRoot, err := blk.Block().HashTreeRoot()
|
blkRoot, err := blk.Block().HashTreeRoot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -29,8 +32,44 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
|
|||||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
|
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if blk.Version() >= version.Deneb {
|
||||||
|
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
requestBlobs[blkRoot] = len(kzgs)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var reqs []*eth.BlobIdentifier
|
||||||
|
for root, count := range requestBlobs {
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
reqs = append(reqs, ð.BlobIdentifier{
|
||||||
|
BlockRoot: root[:],
|
||||||
|
Index: uint64(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(reqs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blobs, err := SendBlobSidecarByRoot(ctx, s.cfg.chain, s.cfg.p2p, id, reqs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: validate blobs
|
||||||
|
for _, blob := range blobs {
|
||||||
|
if err = s.blockAndBlobs.addBlob(blob); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -244,7 +244,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
p1.Connect(p2)
|
p1.Connect(p2)
|
||||||
require.NoError(t, r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
require.NoError(t, r.sendRecentBeaconBlocksAndBlobsRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
||||||
|
|
||||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||||
t.Fatal("Did not receive stream within 1 sec")
|
t.Fatal("Did not receive stream within 1 sec")
|
||||||
|
|||||||
178
beacon-chain/sync/rpc_blob_sidecars_by_range.go
Normal file
178
beacon-chain/sync/rpc_blob_sidecars_by_range.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
|
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
|
pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BlobSidecarProcessor func(sidecar *pb.BlobSidecar) error
|
||||||
|
|
||||||
|
func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, stream libp2pcore.Stream, tw uint64) (uint64, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "sync.streamBlobBatch")
|
||||||
|
defer span.End()
|
||||||
|
var writes uint64
|
||||||
|
for _, b := range batch.Sequence() {
|
||||||
|
root := b.Root()
|
||||||
|
scs, err := s.cfg.beaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||||
|
if errors.Is(err, db.ErrNotFound) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||||
|
return writes, errors.Wrapf(err, "could not retrieve sidecars for block root %#x", root)
|
||||||
|
}
|
||||||
|
for _, sc := range scs {
|
||||||
|
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||||
|
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||||
|
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||||
|
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||||
|
tracing.AnnotateError(span, chunkErr)
|
||||||
|
return writes, chunkErr
|
||||||
|
}
|
||||||
|
s.rateLimiter.add(stream, 1)
|
||||||
|
writes += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return writes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobsSidecarsByRangeRPCHandler looks up the request blobs from the database from a given start slot index
|
||||||
|
func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "sync.BlobsSidecarsByRangeHandler")
|
||||||
|
defer span.End()
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||||
|
defer cancel()
|
||||||
|
SetRPCStreamDeadlines(stream)
|
||||||
|
log := log.WithField("handler", p2p.BlobSidecarsByRangeName[1:]) // slice the leading slash off the name var
|
||||||
|
|
||||||
|
r, ok := msg.(*pb.BlobSidecarsByRangeRequest)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("message is not type *pb.BlobsSidecarsByRangeRequest")
|
||||||
|
}
|
||||||
|
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
start, end, size, err := validateBlobsByRange(r, s.cfg.chain.CurrentSlot())
|
||||||
|
if err != nil {
|
||||||
|
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||||
|
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ticker to stagger out large requests.
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
batcher := &blockRangeBatcher{
|
||||||
|
start: start,
|
||||||
|
end: end,
|
||||||
|
size: size,
|
||||||
|
db: s.cfg.beaconDB,
|
||||||
|
limiter: s.rateLimiter,
|
||||||
|
isCanonical: s.cfg.chain.IsCanonical,
|
||||||
|
ticker: ticker,
|
||||||
|
}
|
||||||
|
|
||||||
|
var batch blockBatch
|
||||||
|
var totalWrites uint64
|
||||||
|
for batch, ok = batcher.Next(ctx, stream); ok; batch, ok = batcher.Next(ctx, stream) {
|
||||||
|
batchStart := time.Now()
|
||||||
|
rpcBlobsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||||
|
writes, err := s.streamBlobBatch(ctx, batch, stream, totalWrites)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
totalWrites += writes
|
||||||
|
// once we have written MAX_REQUEST_BLOB_SIDECARS, we're done serving the request
|
||||||
|
if totalWrites >= params.BeaconNetworkConfig().MaxRequestBlobsSidecars {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := batch.Err(); err != nil {
|
||||||
|
log.WithError(err).Debug("error in BlocksByRange batch")
|
||||||
|
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
closeStream(stream, log)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobsByRangeMinStartSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||||
|
minReqEpochs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
currEpoch := slots.ToEpoch(current)
|
||||||
|
minStart := params.BeaconConfig().DenebForkEpoch
|
||||||
|
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStart {
|
||||||
|
minStart = currEpoch - minReqEpochs
|
||||||
|
}
|
||||||
|
return slots.EpochStart(minStart)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.Slot) (primitives.Slot, primitives.Slot, uint64, error) {
|
||||||
|
start := r.StartSlot
|
||||||
|
if start > current {
|
||||||
|
return current, current, 0, nil
|
||||||
|
}
|
||||||
|
count := r.Count
|
||||||
|
|
||||||
|
end, err := start.SafeAdd((count - 1))
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow start + count -1")
|
||||||
|
}
|
||||||
|
|
||||||
|
maxRequest := params.BeaconNetworkConfig().MaxRequestBlocksDeneb
|
||||||
|
// Allow some wiggle room, up to double the MaxRequestBlocks past the current slot,
|
||||||
|
// to give nodes syncing close to the head of the chain some margin for error.
|
||||||
|
maxStart, err := current.SafeAdd(maxRequest * 2)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, errors.Wrap(p2ptypes.ErrInvalidRequest, "current + maxRequest * 2 > max uint")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||||
|
// [max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]
|
||||||
|
// where current_epoch is defined by the current wall-clock time,
|
||||||
|
// and clients MUST support serving requests of blobs on this range.
|
||||||
|
minStartSlot, err := blobsByRangeMinStartSlot(current)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, errors.Wrap(p2ptypes.ErrInvalidRequest, "blobsByRangeMinStartSlot error")
|
||||||
|
}
|
||||||
|
if start > maxStart {
|
||||||
|
return 0, 0, 0, errors.Wrap(p2ptypes.ErrInvalidRequest, "start > maxStart")
|
||||||
|
}
|
||||||
|
if start < minStartSlot {
|
||||||
|
start = minStartSlot
|
||||||
|
}
|
||||||
|
|
||||||
|
if end > current {
|
||||||
|
end = current
|
||||||
|
}
|
||||||
|
if end < start {
|
||||||
|
end = start
|
||||||
|
}
|
||||||
|
|
||||||
|
limit := uint64(flags.Get().BlobBatchLimit)
|
||||||
|
if limit > maxRequest {
|
||||||
|
limit = maxRequest
|
||||||
|
}
|
||||||
|
batchSize := count
|
||||||
|
if batchSize > limit {
|
||||||
|
batchSize = limit
|
||||||
|
}
|
||||||
|
|
||||||
|
return start, end, batchSize, nil
|
||||||
|
}
|
||||||
273
beacon-chain/sync/rpc_blob_sidecars_by_range_test.go
Normal file
273
beacon-chain/sync/rpc_blob_sidecars_by_range_test.go
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||||
|
currentEpoch := slots.ToEpoch(c.chain.CurrentSlot())
|
||||||
|
oldestEpoch := currentEpoch - params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
if oldestEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||||
|
oldestEpoch = params.BeaconConfig().DenebForkEpoch
|
||||||
|
}
|
||||||
|
oldestSlot, err := slots.EpochStart(oldestEpoch)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return oldestSlot
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobRangeRequestFromSidecars(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock
|
||||||
|
count := uint64(len(scs)) / maxBlobs
|
||||||
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: scs[0].Slot,
|
||||||
|
Count: count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) filterExpectedByRange(t *testing.T, scs []*ethpb.BlobSidecar, req interface{}) []*expectedBlobChunk {
|
||||||
|
var expect []*expectedBlobChunk
|
||||||
|
blockOffset := 0
|
||||||
|
lastRoot := bytesutil.ToBytes32(scs[0].BlockRoot)
|
||||||
|
rreq, ok := req.(*ethpb.BlobSidecarsByRangeRequest)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
var writes uint64
|
||||||
|
for _, sc := range scs {
|
||||||
|
root := bytesutil.ToBytes32(sc.BlockRoot)
|
||||||
|
if root != lastRoot {
|
||||||
|
blockOffset += 1
|
||||||
|
}
|
||||||
|
lastRoot = root
|
||||||
|
|
||||||
|
if sc.Slot < c.oldestSlot(t) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if sc.Slot < rreq.StartSlot || sc.Slot > rreq.StartSlot+types.Slot(rreq.Count)-1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if writes == params.BeaconNetworkConfig().MaxRequestBlobsSidecars {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
expect = append(expect, &expectedBlobChunk{
|
||||||
|
sidecar: sc,
|
||||||
|
code: responseCodeSuccess,
|
||||||
|
message: "",
|
||||||
|
})
|
||||||
|
writes += 1
|
||||||
|
}
|
||||||
|
return expect
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) runTestBlobSidecarsByRange(t *testing.T) {
|
||||||
|
if c.serverHandle == nil {
|
||||||
|
c.serverHandle = func(s *Service) rpcHandler { return s.blobSidecarsByRangeRPCHandler }
|
||||||
|
}
|
||||||
|
if c.defineExpected == nil {
|
||||||
|
c.defineExpected = c.filterExpectedByRange
|
||||||
|
}
|
||||||
|
if c.requestFromSidecars == nil {
|
||||||
|
c.requestFromSidecars = blobRangeRequestFromSidecars
|
||||||
|
}
|
||||||
|
if c.topic == "" {
|
||||||
|
c.topic = p2p.RPCBlobSidecarsByRangeTopicV1
|
||||||
|
}
|
||||||
|
if c.oldestSlot == nil {
|
||||||
|
c.oldestSlot = c.defaultOldestSlotByRange
|
||||||
|
}
|
||||||
|
c.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobByRangeOK(t *testing.T) {
|
||||||
|
cases := []*blobsTestCase{
|
||||||
|
{
|
||||||
|
name: "beginning of window + 10",
|
||||||
|
nblocks: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "10 slots before window, 10 slots after, count = 20",
|
||||||
|
nblocks: 10,
|
||||||
|
requestFromSidecars: func(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: scs[0].Slot - 10,
|
||||||
|
Count: 20,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "request before window, empty response",
|
||||||
|
nblocks: 10,
|
||||||
|
requestFromSidecars: func(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: scs[0].Slot - 10,
|
||||||
|
Count: 10,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
total: func() *int { x := 0; return &x }(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "request before window, empty response",
|
||||||
|
nblocks: 10,
|
||||||
|
requestFromSidecars: func(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: scs[0].Slot - 10,
|
||||||
|
Count: 20,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
total: func() *int { x := int(params.BeaconConfig().MaxBlobsPerBlock * 10); return &x }(), // 10 blocks * 4 blobs = 40
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "request before window, empty response",
|
||||||
|
nblocks: int(params.BeaconNetworkConfig().MaxRequestBlocksDeneb) + 10,
|
||||||
|
requestFromSidecars: func(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: scs[0].Slot,
|
||||||
|
Count: params.BeaconNetworkConfig().MaxRequestBlocksDeneb + 1,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
total: func() *int { x := int(params.BeaconNetworkConfig().MaxRequestBlobsSidecars); return &x }(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
c.runTestBlobSidecarsByRange(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobsByRangeValidation(t *testing.T) {
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
repositionFutureEpochs(cfg)
|
||||||
|
undo, err := params.SetActiveWithUndo(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, undo())
|
||||||
|
}()
|
||||||
|
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
minReqEpochs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
minReqSlots, err := slots.EpochStart(minReqEpochs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// spec criteria for mix,max bound checking
|
||||||
|
/*
|
||||||
|
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||||
|
[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]
|
||||||
|
where current_epoch is defined by the current wall-clock time,
|
||||||
|
and clients MUST support serving requests of blobs on this range.
|
||||||
|
*/
|
||||||
|
defaultCurrent := denebSlot + 100 + minReqSlots
|
||||||
|
defaultMinStart, err := blobsByRangeMinStartSlot(defaultCurrent)
|
||||||
|
require.NoError(t, err)
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
current types.Slot
|
||||||
|
req *ethpb.BlobSidecarsByRangeRequest
|
||||||
|
//chain := defaultMockChain(t)
|
||||||
|
|
||||||
|
start types.Slot
|
||||||
|
end types.Slot
|
||||||
|
batch uint64
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "start at current",
|
||||||
|
current: denebSlot + 100,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: denebSlot + 100,
|
||||||
|
Count: 10,
|
||||||
|
},
|
||||||
|
start: denebSlot + 100,
|
||||||
|
end: denebSlot + 100,
|
||||||
|
batch: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "start after current",
|
||||||
|
current: denebSlot,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: denebSlot + 100,
|
||||||
|
Count: 10,
|
||||||
|
},
|
||||||
|
start: denebSlot,
|
||||||
|
end: denebSlot,
|
||||||
|
batch: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "start before current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
|
||||||
|
current: defaultCurrent,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: defaultMinStart - 100,
|
||||||
|
Count: 10,
|
||||||
|
},
|
||||||
|
start: defaultMinStart,
|
||||||
|
end: defaultMinStart,
|
||||||
|
batch: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "start before current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - end still valid",
|
||||||
|
current: defaultCurrent,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: defaultMinStart - 10,
|
||||||
|
Count: 20,
|
||||||
|
},
|
||||||
|
start: defaultMinStart,
|
||||||
|
end: defaultMinStart + 9,
|
||||||
|
batch: 20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "count > MAX_REQUEST_BLOB_SIDECARS",
|
||||||
|
current: defaultCurrent,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: defaultMinStart - 10,
|
||||||
|
Count: 1000,
|
||||||
|
},
|
||||||
|
start: defaultMinStart,
|
||||||
|
end: defaultMinStart - 10 + 999,
|
||||||
|
// a large count is ok, we just limit the amount of actual responses
|
||||||
|
batch: uint64(flags.Get().BlobBatchLimit),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "start + count > current",
|
||||||
|
current: defaultCurrent,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: defaultCurrent + 100,
|
||||||
|
Count: 100,
|
||||||
|
},
|
||||||
|
start: defaultCurrent,
|
||||||
|
end: defaultCurrent,
|
||||||
|
batch: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "start before deneb",
|
||||||
|
current: defaultCurrent - minReqSlots + 100,
|
||||||
|
req: ðpb.BlobSidecarsByRangeRequest{
|
||||||
|
StartSlot: denebSlot - 10,
|
||||||
|
Count: 100,
|
||||||
|
},
|
||||||
|
start: denebSlot,
|
||||||
|
end: denebSlot + 89,
|
||||||
|
batch: 64,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
start, end, batch, err := validateBlobsByRange(c.req, c.current)
|
||||||
|
if c.err != nil {
|
||||||
|
require.ErrorIs(t, err, c.err)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, c.start, start)
|
||||||
|
require.Equal(t, c.end, end)
|
||||||
|
require.Equal(t, c.batch, batch)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
84
beacon-chain/sync/rpc_blob_sidecars_by_root.go
Normal file
84
beacon-chain/sync/rpc_blob_sidecars_by_root.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func blobMinReqEpoch(finalized, current primitives.Epoch) primitives.Epoch {
|
||||||
|
// max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)
|
||||||
|
denebFork := params.BeaconConfig().DenebForkEpoch
|
||||||
|
var reqWindow primitives.Epoch
|
||||||
|
if current > params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest {
|
||||||
|
reqWindow = current - params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
}
|
||||||
|
if finalized >= reqWindow && finalized > denebFork {
|
||||||
|
return finalized
|
||||||
|
}
|
||||||
|
if reqWindow >= finalized && reqWindow > denebFork {
|
||||||
|
return reqWindow
|
||||||
|
}
|
||||||
|
return denebFork
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobSidecarByRootRPCHandler handles the /eth2/beacon_chain/req/blob_sidecars_by_root/1/ RPC request.
|
||||||
|
// spec: https://github.com/ethereum/consensus-specs/blob/a7e45db9ac2b60a33e144444969ad3ac0aae3d4c/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
|
||||||
|
func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "sync.blobSidecarByRootRPCHandler")
|
||||||
|
defer span.End()
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||||
|
defer cancel()
|
||||||
|
SetRPCStreamDeadlines(stream)
|
||||||
|
log := log.WithField("handler", p2p.BlobSidecarsByRootName[1:]) // slice the leading slash off the name var
|
||||||
|
ref, ok := msg.(*types.BlobSidecarsByRootReq)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("message is not type BlobSidecarsByRootReq")
|
||||||
|
}
|
||||||
|
minReqEpoch := blobMinReqEpoch(s.cfg.chain.FinalizedCheckpt().Epoch, slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||||
|
blobIdents := *ref
|
||||||
|
for i := range blobIdents {
|
||||||
|
root, idx := bytesutil.ToBytes32(blobIdents[i].BlockRoot), blobIdents[i].Index
|
||||||
|
scs, err := s.cfg.beaconDB.BlobSidecarsByRoot(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, db.ErrNotFound) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.WithError(err).Debugf("error retrieving BlobSidecar, root=%x, index=%d", root, idx)
|
||||||
|
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if idx >= uint64(len(scs)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sc := scs[idx]
|
||||||
|
|
||||||
|
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||||
|
// peers MAY respond with error code 3: ResourceUnavailable or not include the blob in the response.
|
||||||
|
if slots.ToEpoch(sc.Slot) < minReqEpoch {
|
||||||
|
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
|
||||||
|
log.WithError(types.ErrBlobLTMinRequest).
|
||||||
|
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
|
||||||
|
return types.ErrBlobLTMinRequest
|
||||||
|
}
|
||||||
|
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||||
|
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||||
|
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||||
|
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||||
|
tracing.AnnotateError(span, chunkErr)
|
||||||
|
return chunkErr
|
||||||
|
}
|
||||||
|
s.rateLimiter.add(stream, 1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
245
beacon-chain/sync/rpc_blob_sidecars_by_root_test.go
Normal file
245
beacon-chain/sync/rpc_blob_sidecars_by_root_test.go
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
|
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *blobsTestCase) defaultOldestSlotByRoot(t *testing.T) types.Slot {
|
||||||
|
oldest, err := slots.EpochStart(blobMinReqEpoch(c.chain.FinalizedCheckPoint.Epoch, slots.ToEpoch(c.chain.CurrentSlot())))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return oldest
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobRootRequestFromSidecars(scs []*ethpb.BlobSidecar) interface{} {
|
||||||
|
req := make(p2pTypes.BlobSidecarsByRootReq, 0)
|
||||||
|
for _, sc := range scs {
|
||||||
|
req = append(req, ðpb.BlobIdentifier{BlockRoot: sc.BlockRoot, Index: sc.Index})
|
||||||
|
}
|
||||||
|
return &req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.BlobSidecar, req interface{}) []*expectedBlobChunk {
|
||||||
|
var expect []*expectedBlobChunk
|
||||||
|
blockOffset := 0
|
||||||
|
if len(scs) == 0 {
|
||||||
|
return expect
|
||||||
|
}
|
||||||
|
lastRoot := bytesutil.ToBytes32(scs[0].BlockRoot)
|
||||||
|
for _, sc := range scs {
|
||||||
|
root := bytesutil.ToBytes32(sc.BlockRoot)
|
||||||
|
if root != lastRoot {
|
||||||
|
blockOffset += 1
|
||||||
|
}
|
||||||
|
lastRoot = root
|
||||||
|
|
||||||
|
// skip sidecars that are supposed to be missing
|
||||||
|
if missed, ok := c.missing[blockOffset]; ok && missed[int(sc.Index)] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if a sidecar is expired, we'll expect an error for the *first* index, and after that
|
||||||
|
// we'll expect no further chunks in the stream, so filter out any further expected responses.
|
||||||
|
// we don't need to check what index this is because we work through them in order and the first one
|
||||||
|
// will set streamTerminated = true and skip everything else in the test case.
|
||||||
|
if c.expired[blockOffset] {
|
||||||
|
return append(expect, &expectedBlobChunk{
|
||||||
|
sidecar: sc,
|
||||||
|
code: responseCodeResourceUnavailable,
|
||||||
|
message: p2pTypes.ErrBlobLTMinRequest.Error(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
expect = append(expect, &expectedBlobChunk{
|
||||||
|
sidecar: sc,
|
||||||
|
code: responseCodeSuccess,
|
||||||
|
message: "",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return expect
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *blobsTestCase) runTestBlobSidecarsByRoot(t *testing.T) {
|
||||||
|
if c.serverHandle == nil {
|
||||||
|
c.serverHandle = func(s *Service) rpcHandler { return s.blobSidecarByRootRPCHandler }
|
||||||
|
}
|
||||||
|
if c.defineExpected == nil {
|
||||||
|
c.defineExpected = c.filterExpectedByRoot
|
||||||
|
}
|
||||||
|
if c.requestFromSidecars == nil {
|
||||||
|
c.requestFromSidecars = blobRootRequestFromSidecars
|
||||||
|
}
|
||||||
|
if c.topic == "" {
|
||||||
|
c.topic = p2p.RPCBlobSidecarsByRootTopicV1
|
||||||
|
}
|
||||||
|
if c.oldestSlot == nil {
|
||||||
|
c.oldestSlot = c.defaultOldestSlotByRoot
|
||||||
|
}
|
||||||
|
c.run(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobsByRootValidation(t *testing.T) {
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
repositionFutureEpochs(cfg)
|
||||||
|
undo, err := params.SetActiveWithUndo(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, undo())
|
||||||
|
}()
|
||||||
|
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||||
|
require.NoError(t, err)
|
||||||
|
dmc := defaultMockChain(t)
|
||||||
|
dmc.Slot = &capellaSlot
|
||||||
|
dmc.FinalizedCheckPoint = ðpb.Checkpoint{Epoch: params.BeaconConfig().CapellaForkEpoch}
|
||||||
|
cases := []*blobsTestCase{
|
||||||
|
{
|
||||||
|
name: "block before minimum_request_epoch",
|
||||||
|
nblocks: 1,
|
||||||
|
expired: map[int]bool{0: true},
|
||||||
|
chain: dmc,
|
||||||
|
err: p2pTypes.ErrBlobLTMinRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "blocks before and after minimum_request_epoch",
|
||||||
|
nblocks: 2,
|
||||||
|
expired: map[int]bool{0: true},
|
||||||
|
chain: dmc,
|
||||||
|
err: p2pTypes.ErrBlobLTMinRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one after minimum_request_epoch then one before",
|
||||||
|
nblocks: 2,
|
||||||
|
expired: map[int]bool{1: true},
|
||||||
|
chain: dmc,
|
||||||
|
err: p2pTypes.ErrBlobLTMinRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one missing index, one after minimum_request_epoch then one before",
|
||||||
|
nblocks: 3,
|
||||||
|
missing: map[int]map[int]bool{0: map[int]bool{0: true}},
|
||||||
|
expired: map[int]bool{1: true},
|
||||||
|
chain: dmc,
|
||||||
|
err: p2pTypes.ErrBlobLTMinRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 missing indices from 2 different blocks",
|
||||||
|
nblocks: 3,
|
||||||
|
missing: map[int]map[int]bool{0: map[int]bool{0: true}, 2: map[int]bool{3: true}},
|
||||||
|
total: func(i int) *int { return &i }(3*int(params.BeaconConfig().MaxBlobsPerBlock) - 2), // aka 10
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all indices missing",
|
||||||
|
nblocks: 1,
|
||||||
|
missing: map[int]map[int]bool{0: map[int]bool{0: true, 1: true, 2: true, 3: true}},
|
||||||
|
total: func(i int) *int { return &i }(0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "block with all indices missing between 2 full blocks",
|
||||||
|
nblocks: 3,
|
||||||
|
missing: map[int]map[int]bool{1: map[int]bool{0: true, 1: true, 2: true, 3: true}},
|
||||||
|
total: func(i int) *int { return &i }(8),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
c.runTestBlobSidecarsByRoot(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobsByRootOK(t *testing.T) {
|
||||||
|
cases := []*blobsTestCase{
|
||||||
|
{
|
||||||
|
name: "0 blob",
|
||||||
|
nblocks: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "1 blob",
|
||||||
|
nblocks: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 blob",
|
||||||
|
nblocks: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
c.runTestBlobSidecarsByRoot(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobsByRootMinReqEpoch(t *testing.T) {
|
||||||
|
winMin := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
finalized types.Epoch
|
||||||
|
current types.Epoch
|
||||||
|
deneb types.Epoch
|
||||||
|
expected types.Epoch
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "testnet genesis",
|
||||||
|
deneb: 100,
|
||||||
|
current: 0,
|
||||||
|
finalized: 0,
|
||||||
|
expected: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "underflow averted",
|
||||||
|
deneb: 100,
|
||||||
|
current: winMin - 1,
|
||||||
|
finalized: 0,
|
||||||
|
expected: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "underflow averted - finalized is higher",
|
||||||
|
deneb: 100,
|
||||||
|
current: winMin - 1,
|
||||||
|
finalized: winMin - 2,
|
||||||
|
expected: winMin - 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "underflow averted - genesis at deneb",
|
||||||
|
deneb: 0,
|
||||||
|
current: winMin - 1,
|
||||||
|
finalized: 0,
|
||||||
|
expected: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "max is finalized",
|
||||||
|
deneb: 100,
|
||||||
|
current: 99 + winMin,
|
||||||
|
finalized: 101,
|
||||||
|
expected: 101,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reqWindow > finalized, reqWindow < deneb",
|
||||||
|
deneb: 100,
|
||||||
|
current: 99 + winMin,
|
||||||
|
finalized: 98,
|
||||||
|
expected: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
repositionFutureEpochs(cfg)
|
||||||
|
cfg.DenebForkEpoch = c.deneb
|
||||||
|
undo, err := params.SetActiveWithUndo(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, undo())
|
||||||
|
}()
|
||||||
|
ep := blobMinReqEpoch(c.finalized, c.current)
|
||||||
|
require.Equal(t, c.expected, ep)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,7 +12,9 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
)
|
)
|
||||||
|
|
||||||
// chunkBlockWriter writes the given message as a chunked response to the given network
|
// chunkBlockWriter writes the given message as a chunked response to the given network
|
||||||
@@ -53,6 +55,20 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
obtainedCtx = digest[:]
|
obtainedCtx = digest[:]
|
||||||
|
case version.Capella:
|
||||||
|
valRoot := chain.GenesisValidatorsRoot()
|
||||||
|
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
obtainedCtx = digest[:]
|
||||||
|
case version.Deneb:
|
||||||
|
valRoot := chain.GenesisValidatorsRoot()
|
||||||
|
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
obtainedCtx = digest[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
|
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
|
||||||
@@ -73,6 +89,25 @@ func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2
|
|||||||
return readResponseChunk(stream, chain, p2p)
|
return readResponseChunk(stream, chain, p2p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriteBlobSidecarChunk writes blob chunk object to stream.
|
||||||
|
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||||
|
func WriteBlobSidecarChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, sidecar *ethpb.BlobSidecar) error {
|
||||||
|
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valRoot := chain.GenesisValidatorsRoot()
|
||||||
|
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.GetSlot()), valRoot[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writeContextToStream(ctxBytes[:], stream, chain); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
|
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
|
||||||
// it.
|
// it.
|
||||||
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
|
|||||||
56
beacon-chain/sync/rpc_handler_test.go
Normal file
56
beacon-chain/sync/rpc_handler_test.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rpcHandlerTest struct {
|
||||||
|
t *testing.T
|
||||||
|
topic protocol.ID
|
||||||
|
timeout time.Duration
|
||||||
|
err error
|
||||||
|
s *Service
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *rpcHandlerTest) testHandler(nh network.StreamHandler, rh rpcHandler, rhi interface{}) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), rt.timeout)
|
||||||
|
defer func() {
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
w := util.NewWaiter()
|
||||||
|
server := p2ptest.NewTestP2P(rt.t)
|
||||||
|
|
||||||
|
client, ok := rt.s.cfg.p2p.(*p2ptest.TestP2P)
|
||||||
|
require.Equal(rt.t, true, ok)
|
||||||
|
|
||||||
|
client.Connect(server)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(rt.t, client.Disconnect(server.PeerID()))
|
||||||
|
}()
|
||||||
|
require.Equal(rt.t, 1, len(client.BHost.Network().Peers()), "Expected peers to be connected")
|
||||||
|
h := func(stream network.Stream) {
|
||||||
|
defer w.Done()
|
||||||
|
nh(stream)
|
||||||
|
}
|
||||||
|
server.BHost.SetStreamHandler(protocol.ID(rt.topic), h)
|
||||||
|
stream, err := client.BHost.NewStream(ctx, server.BHost.ID(), protocol.ID(rt.topic))
|
||||||
|
require.NoError(rt.t, err)
|
||||||
|
|
||||||
|
err = rh(ctx, rhi, stream)
|
||||||
|
if rt.err == nil {
|
||||||
|
require.NoError(rt.t, err)
|
||||||
|
} else {
|
||||||
|
require.ErrorIs(rt.t, err, rt.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.RequireDoneBeforeCancel(rt.t, ctx)
|
||||||
|
}
|
||||||
@@ -129,3 +129,30 @@ func SendBeaconBlocksByRootRequest(
|
|||||||
}
|
}
|
||||||
return blocks, nil
|
return blocks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SendBlobSidecarByRoot(
|
||||||
|
ctx context.Context, ci blockchain.ChainInfoFetcher, p2pApi p2p.P2P, pid peer.ID,
|
||||||
|
req p2ptypes.BlobSidecarsByRootReq,
|
||||||
|
) ([]*pb.BlobSidecar, error) {
|
||||||
|
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRootName, slots.ToEpoch(ci.CurrentSlot()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stream, err := p2pApi.Send(ctx, req, topic, pid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer closeStream(stream, log)
|
||||||
|
|
||||||
|
sidecars := make([]*pb.BlobSidecar, 0, len(req))
|
||||||
|
|
||||||
|
max := params.BeaconNetworkConfig().MaxRequestBlobsSidecars * params.BeaconConfig().MaxBlobsPerBlock
|
||||||
|
for i := 0; i < len(req); i++ {
|
||||||
|
// Exit if peer sends more than MAX_REQUEST_BLOBS_SIDECARS.
|
||||||
|
if uint64(i) >= max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// TODO: Read sidecar
|
||||||
|
}
|
||||||
|
return sidecars, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
gcache "github.com/patrickmn/go-cache"
|
gcache "github.com/patrickmn/go-cache"
|
||||||
@@ -34,6 +35,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||||
@@ -42,7 +44,7 @@ import (
|
|||||||
|
|
||||||
var _ runtime.Service = (*Service)(nil)
|
var _ runtime.Service = (*Service)(nil)
|
||||||
|
|
||||||
const rangeLimit = 1024
|
const rangeLimit uint64 = 1024
|
||||||
const seenBlockSize = 1000
|
const seenBlockSize = 1000
|
||||||
const seenUnaggregatedAttSize = 20000
|
const seenUnaggregatedAttSize = 20000
|
||||||
const seenAggregatedAttSize = 1024
|
const seenAggregatedAttSize = 1024
|
||||||
@@ -139,6 +141,7 @@ type Service struct {
|
|||||||
syncContributionBitsOverlapLock sync.RWMutex
|
syncContributionBitsOverlapLock sync.RWMutex
|
||||||
syncContributionBitsOverlapCache *lru.Cache
|
syncContributionBitsOverlapCache *lru.Cache
|
||||||
signatureChan chan *signatureVerifier
|
signatureChan chan *signatureVerifier
|
||||||
|
blockAndBlobs *blockAndBlocksQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService initializes new regular sync service.
|
// NewService initializes new regular sync service.
|
||||||
@@ -154,6 +157,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
|||||||
seenPendingBlocks: make(map[[32]byte]bool),
|
seenPendingBlocks: make(map[[32]byte]bool),
|
||||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||||
|
blockAndBlobs: newBlockAndBlobs(),
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if err := opt(r); err != nil {
|
if err := opt(r); err != nil {
|
||||||
@@ -287,6 +291,14 @@ func (s *Service) registerHandlers() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||||
|
writeErrorResponseToStream(responseCode, reason, stream, s.cfg.p2p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) setRateCollector(topic string, c *leakybucket.Collector) {
|
||||||
|
s.rateLimiter.limiterMap[topic] = c
|
||||||
|
}
|
||||||
|
|
||||||
// marks the chain as having started.
|
// marks the chain as having started.
|
||||||
func (s *Service) markForChainStart() {
|
func (s *Service) markForChainStart() {
|
||||||
s.chainStarted.Set()
|
s.chainStarted.Set()
|
||||||
|
|||||||
@@ -87,6 +87,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
|||||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||||
digest,
|
digest,
|
||||||
|
params.BeaconNetworkConfig().AttestationSubnetCount,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
s.subscribeDynamicWithSubnets(
|
s.subscribeDynamicWithSubnets(
|
||||||
@@ -130,6 +131,17 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
|||||||
digest,
|
digest,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New Gossip Topic in Deneb
|
||||||
|
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||||
|
s.subscribeStaticWithSubnets(
|
||||||
|
p2p.BlobSubnetTopicFormat,
|
||||||
|
s.validateBlob, /* validator */
|
||||||
|
s.blobSubscriber, /* message handler */
|
||||||
|
digest,
|
||||||
|
params.BeaconNetworkConfig().BlobSubnetCount,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// subscribe to a given topic with a given validator and subscription handler.
|
// subscribe to a given topic with a given validator and subscription handler.
|
||||||
@@ -300,7 +312,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
|||||||
|
|
||||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte, subnetCount uint64) {
|
||||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -312,7 +324,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
|||||||
// Impossible condition as it would mean topic does not exist.
|
// Impossible condition as it would mean topic does not exist.
|
||||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||||
}
|
}
|
||||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
for i := uint64(0); i < subnetCount; i++ {
|
||||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||||
}
|
}
|
||||||
genesis := s.cfg.chain.GenesisTime()
|
genesis := s.cfg.chain.GenesisTime()
|
||||||
@@ -336,7 +348,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
|||||||
if !valid {
|
if !valid {
|
||||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||||
// Unsubscribes from all our current subnets.
|
// Unsubscribes from all our current subnets.
|
||||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
for i := uint64(0); i < subnetCount; i++ {
|
||||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||||
s.unSubscribeFromTopic(fullTopic)
|
s.unSubscribeFromTopic(fullTopic)
|
||||||
}
|
}
|
||||||
@@ -344,7 +356,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Check every slot that there are enough peers
|
// Check every slot that there are enough peers
|
||||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
for i := uint64(0); i < subnetCount; i++ {
|
||||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user