Compare commits

..

42 Commits

Author SHA1 Message Date
nisdas
79657b158c Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hackSync 2025-03-05 13:33:53 +08:00
Potuz
a7b016c954 Add Target root to forkchoice dump (#15009)
* Add Target root to forkchoice dump

* review
2025-03-05 04:49:44 +00:00
Manu NALEPA
6015493de9 --validators-registration-batch-size: Change default value from 0 to 200. (#14981) 2025-03-04 16:32:24 +00:00
Dhruv Bodani
c718bdbe2b fix pruner timing issue with batch pruning (#14929)
* fix pruner timing issue with batch pruning

* add changelog entry

* add batchSize and number of slots deleted to debug logs

* fix lint

* prune in small batches in one prune cycle

* remove noisy logs

* fix number of batches to prune and return early if there's nothing to delete

* use context with timeout

* fix lint by ignoring nil err return
2025-03-04 16:31:17 +00:00
Preston Van Loon
0a8f947169 beacon-chain: Reorganize flags in help text (#14959)
* Beacon flags: Comment on deprecated section

* Beacon flags: Organize flags relevant to logging, comment on logging section

* Beacon flags: Organize flags relevant to p2p, comment on p2p section

* Beacon flags: Introduce db flag section, organize flags relevant to db, comment on db section

* Beacon flags: Introduce builder flag section, organize flags relevant to builder, comment on builder section

* Beacon flags: Introduce sync flag section, organize flags relevant to sync, comment on sync section

* Beacon flags: Introduce execution layer flag section, organize flags relevant to execution layer, comment on execution layer section

* Beacon flags: Introduce monitoring flag section, organize flags relevant to monitoring, comment on monitoring section

* Beacon flags: Organizing remaining flags in cmd and beacon-chain sections

* Beacon flags: Introduce slasher flag section, organize flags relevant to slasher, comment on slasher section

* Move slasher flag from features to the slasher section

* Changelog fragment

* Beacon flags: Reorganize sections

* Move MaxGoroutines to debug section
2025-03-04 16:17:29 +00:00
james-prysm
d7efccf6a5 single attestation cleanup (#14984)
* some cleanup and minor bug fix

* adding some comments back in

* Update beacon-chain/sync/pending_attestations_queue.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/pending_attestations_queue.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/validate_beacon_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/validate_beacon_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/validate_beacon_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/validate_beacon_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/validate_beacon_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/pending_attestations_queue.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding comment back in

* linting

* fixing committeeIndiciesSLot

* fixing changelog

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-03-04 14:50:59 +00:00
Nishant Das
334920bc9e Fix Block Decoding During State Deletion (#15008)
* Fix Block Decoding

* Changelog
2025-03-04 11:55:32 +00:00
james-prysm
6e00db433c cache uses wrong seen committee index for electra (#14998)
* adding fix and unit tests

* removing unneeded test for now, will handle in separate PR
2025-03-03 16:53:42 +00:00
james-prysm
c6344e7c3e fixing electra committee logs (#14992)
* fixing logs

* fixing tests

* addressing feedback

* fixing tests based on feedback
2025-03-03 15:35:45 +00:00
nisdas
2c6e028600 Add a Hack 2025-03-03 08:52:20 +08:00
Potuz
a9dc6a1dbb missing save 2025-03-02 16:48:38 -03:00
Nishant Das
713fd33eb5 Fix Bugs In Sync From Head (#15006)
* Fix Bugs

* Remove log
2025-03-02 16:48:16 -03:00
nisdas
5f56507bee Merge branch 'sync_from_head' of https://github.com/prysmaticlabs/geth-sharding into hackSync 2025-03-01 07:39:52 +08:00
rkapka
07f0d5ee72 delete on error 2025-02-28 18:16:30 +01:00
rkapka
23bbf2380f Ignore errors from hasSeenBit and don't pack unaggregated attestations 2025-02-28 18:11:47 +01:00
Potuz
9e96de033b Add feature flag to start from any beacon block in db
The new feature flag called --sync-from takes a string that can take
values:

- `head` or
- a 0x-prefixed hex encoded beacon block root.

The beacon block root or the head block root has to be known in db and
has to be a descendant of the current justified checkpoint.
2025-02-28 10:47:43 -03:00
Potuz
2131254722 Populate pubkey cache at genesis (#14995)
When starting with a zero root finalized checkpoint instead of returning
early also populate the pubkey cache.
2025-02-28 11:12:52 +00:00
Potuz
b6d1866deb Split out forkchoice setup from service start (#14997) 2025-02-27 20:38:29 +00:00
james-prysm
e56f489d06 add log for committee index on electra attesation (#14993)
* adding log in the case of debugging

* gaz
2025-02-27 18:11:27 +00:00
Nishant Das
bf62afb27c Fix Gossip Validation of Electra Attester Slashings (#14985)
* Fix Attester Slashing Validation In Electra

* Changelog
2025-02-27 08:50:24 +00:00
nisdas
fd41691178 Revert Fallback Sync 2025-02-27 12:11:12 +08:00
nisdas
5916c6e625 Fix Attester Slashing Validation In Electra 2025-02-26 15:09:59 +08:00
terence tsao
0312cb223a Move blockRoot == badHoleskyRoot earlier 2025-02-25 13:53:24 -08:00
terence tsao
68cf7a59f2 Update BUILD.bazel 2025-02-25 12:46:40 -08:00
Kasey Kirkham
537ddb1a24 reject bad root in receive block(batch) 2025-02-25 14:44:18 -06:00
Kasey Kirkham
7afaa6994b hardcode root to remove cache eviction possibility 2025-02-25 14:37:50 -06:00
terence tsao
defb3ab87b Revert terence's downscore change 2025-02-25 11:45:56 -08:00
terence tsao
81dce25c98 Gazelle 2025-02-25 11:41:28 -08:00
Kasey Kirkham
5c409b90bc hack to downscore peers with bad holesky root 2025-02-25 13:40:33 -06:00
terence tsao
aa47661602 Downscore 2025-02-25 11:29:53 -08:00
potuz
7259a2b983 blacklist on batches too 2025-02-25 16:16:53 -03:00
terence tsao
e9f511ac00 Update bootnodes 2025-02-25 08:39:17 -08:00
potuz
34a68715b8 blacklist bad block 2025-02-25 11:40:35 -03:00
Bastin
8369056027 SSZ Support for LC finality and optimistic update APIs (#14836)
* bundle handlers test

* ssz support for optimistic and finality updates APIs

* changelog PR link

* delete helpers

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-02-25 13:09:28 +00:00
nisdas
c33e0575ab do not resync 2025-02-25 17:32:16 +08:00
Preston Van Loon
09499a732f gosec: Fix violations of G301 (#14980)
* gosec: Fix violations of G301

* Changelog fragment
2025-02-24 15:13:53 +00:00
Preston Van Loon
2ee015452c Use go-cmp and protocmp for assertion diff printing (#14978)
Fix assertion tests after switching to cmp

Changelog fragment
2025-02-23 21:06:07 +00:00
terence
ffc1bf8bbe Add more verbosity to validate range message (#14975) 2025-02-21 19:54:40 +00:00
Preston Van Loon
014dbd5c3a Lint: Fix violations of non-constant format string in call (#14974)
* Fix violations of non-constant format string in call

* Changelog fragment
2025-02-21 19:46:19 +00:00
Preston Van Loon
9bceaa59d2 tracing: Add otel tracer transport to all http clients (#14972)
* Fixed otelhttp client setups.

Note: This may not be the best solution as the http client is defined in many places. There should be a canoncial http client with the proper setup.

* Changelog fragment

* go mod tidy and gazelle
2025-02-21 19:39:53 +00:00
Preston Van Loon
832ebb3f39 Lint: Fix violations of S1009: should omit nil check; len() for nil slices is defined as zero (#14973)
* Fix violations of S1009: should omit nil check; len() for nil slices is defined as zero

* Changelog fragment
2025-02-21 19:39:33 +00:00
james-prysm
8345c271cc Code Cleanup: payload conversions and fixing gocognit ignores (#14953)
* created new converstions_execution files moving the from and to consensus functions for execution related items, also created a placeholder for block_execution with the intent of adding execution types there

* moving execution types from block.go to block_execution.go

* migrating more types and logic

* adding all the to consensus functions for payloads

* changelog

* linting

* updating unit tests for conversions

* fixing linting

* forgot to fix test

* updating name based on feedback
2025-02-21 16:12:01 +00:00
365 changed files with 5647 additions and 25526 deletions

View File

@@ -22,7 +22,6 @@ coverage --define=coverage_enabled=1
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false
build --compilation_mode=opt
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true

View File

@@ -255,7 +255,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.10"
consensus_spec_version = "v1.5.0-beta.2"
bls_test_version = "v0.1.1"
@@ -271,7 +271,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
integrity = "sha256-X/bMxbKg1clo2aFEjBoeuFq/U+BF1eQopgRP/7nI3Qg=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -287,7 +287,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
integrity = "sha256-WSxdri5OJGuNApW+odKle5UzToDyEOx+F3lMiqamJAg=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -303,7 +303,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
integrity = "sha256-LYE8l3y/zSt4YVrehrJ3ralqtgeYNildiIp+HR6+xAI=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -318,7 +318,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
integrity = "sha256-jvZQ90qcJMTOqMsPO7sgeEVQmewZTHcz7LVDkNqwTFQ=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -25,6 +25,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
const (
@@ -108,7 +109,7 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
return nil, err
}
c := &Client{
hc: &http.Client{},
hc: &http.Client{Transport: otelhttp.NewTransport(http.DefaultTransport)},
baseURL: u,
}
for _, o := range opts {

View File

@@ -4,9 +4,11 @@ go_library(
name = "go_default_library",
srcs = [
"block.go",
"block_execution.go",
"conversions.go",
"conversions_blob.go",
"conversions_block.go",
"conversions_block_execution.go",
"conversions_lightclient.go",
"conversions_state.go",
"endpoints_beacon.go",
@@ -47,10 +49,16 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["conversions_test.go"],
srcs = [
"conversions_block_execution_test.go",
"conversions_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],
)

View File

@@ -186,40 +186,6 @@ type BlindedBeaconBlockBodyBellatrix struct {
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
}
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
@@ -298,42 +264,6 @@ type BlindedBeaconBlockBodyCapella struct {
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
}
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
@@ -426,46 +356,6 @@ type BlindedBeaconBlockBodyDeneb struct {
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type ExecutionPayloadDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadHeaderDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
@@ -560,14 +450,6 @@ type BlindedBeaconBlockBodyElectra struct {
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type (
ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
}
)
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------

View File

@@ -0,0 +1,157 @@
package structs
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
type ExecutionPayloadDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadHeaderDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
type ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
}
type DepositRequest struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Index string `json:"index"`
}
type WithdrawalRequest struct {
SourceAddress string `json:"source_address"`
ValidatorPubkey string `json:"validator_pubkey"`
Amount string `json:"amount"`
}
type ConsolidationRequest struct {
SourceAddress string `json:"source_address"`
SourcePubkey string `json:"source_pubkey"`
TargetPubkey string `json:"target_pubkey"`
}
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------

View File

@@ -889,126 +889,6 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
}
}
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
result := make([]*WithdrawalRequest, len(ws))
for i, w := range ws {
result[i] = WithdrawalRequestFromConsensus(w)
}
return result
}
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
return &WithdrawalRequest{
SourceAddress: hexutil.Encode(w.SourceAddress),
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
Amount: fmt.Sprintf("%d", w.Amount),
}
}
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorPubkey")
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
return &enginev1.WithdrawalRequest{
SourceAddress: src,
ValidatorPubkey: pubkey,
Amount: amount,
}, nil
}
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
result := make([]*ConsolidationRequest, len(cs))
for i, c := range cs {
result[i] = ConsolidationRequestFromConsensus(c)
}
return result
}
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
return &ConsolidationRequest{
SourceAddress: hexutil.Encode(c.SourceAddress),
SourcePubkey: hexutil.Encode(c.SourcePubkey),
TargetPubkey: hexutil.Encode(c.TargetPubkey),
}
}
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourcePubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "TargetPubkey")
}
return &enginev1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: srcPubkey,
TargetPubkey: targetPubkey,
}, nil
}
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
result := make([]*DepositRequest, len(ds))
for i, d := range ds {
result[i] = DepositRequestFromConsensus(d)
}
return result
}
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
return &DepositRequest{
Pubkey: hexutil.Encode(d.Pubkey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Index: fmt.Sprintf("%d", d.Index),
}
}
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "Pubkey")
}
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
}
amount, err := strconv.ParseUint(d.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
index, err := strconv.ParseUint(d.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
return &enginev1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: sig,
Index: index,
}, nil
}
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
if src == nil {
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,973 @@
package structs
import (
"fmt"
"strconv"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
)
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
func ExecutionPayloadFromConsensus(payload *enginev1.ExecutionPayload) (*ExecutionPayload, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
transactions := make([]string, len(payload.Transactions))
for i, tx := range payload.Transactions {
transactions[i] = hexutil.Encode(tx)
}
return &ExecutionPayload{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
Transactions: transactions,
}, nil
}
func (e *ExecutionPayload) ToConsensus() (*enginev1.ExecutionPayload, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
}
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
}
payloadTxs := make([][]byte, len(e.Transactions))
for i, tx := range e.Transactions {
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
}
}
return &enginev1.ExecutionPayload{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
Transactions: payloadTxs,
}, nil
}
func ExecutionPayloadHeaderFromConsensus(payload *enginev1.ExecutionPayloadHeader) (*ExecutionPayloadHeader, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
return &ExecutionPayloadHeader{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
}, nil
}
func (e *ExecutionPayloadHeader) ToConsensus() (*enginev1.ExecutionPayloadHeader, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
}
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
}
return &enginev1.ExecutionPayloadHeader{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
TransactionsRoot: payloadTxsRoot,
}, nil
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
func ExecutionPayloadCapellaFromConsensus(payload *enginev1.ExecutionPayloadCapella) (*ExecutionPayloadCapella, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
transactions := make([]string, len(payload.Transactions))
for i, tx := range payload.Transactions {
transactions[i] = hexutil.Encode(tx)
}
return &ExecutionPayloadCapella{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
Transactions: transactions,
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
}, nil
}
func (e *ExecutionPayloadCapella) ToConsensus() (*enginev1.ExecutionPayloadCapella, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
}
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
}
payloadTxs := make([][]byte, len(e.Transactions))
for i, tx := range e.Transactions {
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
}
}
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
}
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
for i, w := range e.Withdrawals {
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
}
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
}
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
}
withdrawals[i] = &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
Address: address,
Amount: amount,
}
}
return &enginev1.ExecutionPayloadCapella{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
Transactions: payloadTxs,
Withdrawals: withdrawals,
}, nil
}
func ExecutionPayloadHeaderCapellaFromConsensus(payload *enginev1.ExecutionPayloadHeaderCapella) (*ExecutionPayloadHeaderCapella, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
return &ExecutionPayloadHeaderCapella{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
}, nil
}
func (e *ExecutionPayloadHeaderCapella) ToConsensus() (*enginev1.ExecutionPayloadHeaderCapella, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithMaxLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
}
payloadTxsRoot, err := bytesutil.DecodeHexWithMaxLength(e.TransactionsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
}
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithMaxLength(e.WithdrawalsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
}
return &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
TransactionsRoot: payloadTxsRoot,
WithdrawalsRoot: payloadWithdrawalsRoot,
}, nil
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
func ExecutionPayloadDenebFromConsensus(payload *enginev1.ExecutionPayloadDeneb) (*ExecutionPayloadDeneb, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
transactions := make([]string, len(payload.Transactions))
for i, tx := range payload.Transactions {
transactions[i] = hexutil.Encode(tx)
}
return &ExecutionPayloadDeneb{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
Transactions: transactions,
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
}, nil
}
func (e *ExecutionPayloadDeneb) ToConsensus() (*enginev1.ExecutionPayloadDeneb, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
}
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
}
txs := make([][]byte, len(e.Transactions))
for i, tx := range e.Transactions {
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
}
}
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
}
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
for i, w := range e.Withdrawals {
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
}
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
}
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
}
withdrawals[i] = &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
Address: address,
Amount: amount,
}
}
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
}
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
}
return &enginev1.ExecutionPayloadDeneb{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
Transactions: txs,
Withdrawals: withdrawals,
BlobGasUsed: payloadBlobGasUsed,
ExcessBlobGas: payloadExcessBlobGas,
}, nil
}
func ExecutionPayloadHeaderDenebFromConsensus(payload *enginev1.ExecutionPayloadHeaderDeneb) (*ExecutionPayloadHeaderDeneb, error) {
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
if err != nil {
return nil, err
}
return &ExecutionPayloadHeaderDeneb{
ParentHash: hexutil.Encode(payload.ParentHash),
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
StateRoot: hexutil.Encode(payload.StateRoot),
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
LogsBloom: hexutil.Encode(payload.LogsBloom),
PrevRandao: hexutil.Encode(payload.PrevRandao),
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
ExtraData: hexutil.Encode(payload.ExtraData),
BaseFeePerGas: baseFeePerGas,
BlockHash: hexutil.Encode(payload.BlockHash),
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
}, nil
}
func (e *ExecutionPayloadHeaderDeneb) ToConsensus() (*enginev1.ExecutionPayloadHeaderDeneb, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
}
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
}
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
}
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
}
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
}
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
}
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
}
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
}
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
}
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
}
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
}
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
}
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
}
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
}
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
}
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithLength(e.WithdrawalsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
}
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
}
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
}
return &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadParentHash,
FeeRecipient: payloadFeeRecipient,
StateRoot: payloadStateRoot,
ReceiptsRoot: payloadReceiptsRoot,
LogsBloom: payloadLogsBloom,
PrevRandao: payloadPrevRandao,
BlockNumber: payloadBlockNumber,
GasLimit: payloadGasLimit,
GasUsed: payloadGasUsed,
Timestamp: payloadTimestamp,
ExtraData: payloadExtraData,
BaseFeePerGas: payloadBaseFeePerGas,
BlockHash: payloadBlockHash,
TransactionsRoot: payloadTxsRoot,
WithdrawalsRoot: payloadWithdrawalsRoot,
BlobGasUsed: payloadBlobGasUsed,
ExcessBlobGas: payloadExcessBlobGas,
}, nil
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
var (
ExecutionPayloadElectraFromConsensus = ExecutionPayloadDenebFromConsensus
ExecutionPayloadHeaderElectraFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
)
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
result := make([]*WithdrawalRequest, len(ws))
for i, w := range ws {
result[i] = WithdrawalRequestFromConsensus(w)
}
return result
}
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
return &WithdrawalRequest{
SourceAddress: hexutil.Encode(w.SourceAddress),
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
Amount: fmt.Sprintf("%d", w.Amount),
}
}
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
if w == nil {
return nil, server.NewDecodeError(errNilValue, "WithdrawalRequest")
}
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorPubkey")
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
return &enginev1.WithdrawalRequest{
SourceAddress: src,
ValidatorPubkey: pubkey,
Amount: amount,
}, nil
}
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
result := make([]*ConsolidationRequest, len(cs))
for i, c := range cs {
result[i] = ConsolidationRequestFromConsensus(c)
}
return result
}
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
return &ConsolidationRequest{
SourceAddress: hexutil.Encode(c.SourceAddress),
SourcePubkey: hexutil.Encode(c.SourcePubkey),
TargetPubkey: hexutil.Encode(c.TargetPubkey),
}
}
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
if c == nil {
return nil, server.NewDecodeError(errNilValue, "ConsolidationRequest")
}
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourcePubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "TargetPubkey")
}
return &enginev1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: srcPubkey,
TargetPubkey: targetPubkey,
}, nil
}
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
result := make([]*DepositRequest, len(ds))
for i, d := range ds {
result[i] = DepositRequestFromConsensus(d)
}
return result
}
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
return &DepositRequest{
Pubkey: hexutil.Encode(d.Pubkey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Index: fmt.Sprintf("%d", d.Index),
}
}
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
if d == nil {
return nil, server.NewDecodeError(errNilValue, "DepositRequest")
}
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "Pubkey")
}
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
}
amount, err := strconv.ParseUint(d.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
index, err := strconv.ParseUint(d.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
return &enginev1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: sig,
Index: index,
}, nil
}
func ExecutionRequestsFromConsensus(er *enginev1.ExecutionRequests) *ExecutionRequests {
return &ExecutionRequests{
Deposits: DepositRequestsFromConsensus(er.Deposits),
Withdrawals: WithdrawalRequestsFromConsensus(er.Withdrawals),
Consolidations: ConsolidationRequestsFromConsensus(er.Consolidations),
}
}
func (e *ExecutionRequests) ToConsensus() (*enginev1.ExecutionRequests, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionRequests")
}
var err error
if err = slice.VerifyMaxLength(e.Deposits, params.BeaconConfig().MaxDepositRequestsPerPayload); err != nil {
return nil, err
}
depositRequests := make([]*enginev1.DepositRequest, len(e.Deposits))
for i, d := range e.Deposits {
depositRequests[i], err = d.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Deposits[%d]", i))
}
}
if err = slice.VerifyMaxLength(e.Withdrawals, params.BeaconConfig().MaxWithdrawalRequestsPerPayload); err != nil {
return nil, err
}
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(e.Withdrawals))
for i, w := range e.Withdrawals {
withdrawalRequests[i], err = w.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Withdrawals[%d]", i))
}
}
if err = slice.VerifyMaxLength(e.Consolidations, params.BeaconConfig().MaxConsolidationsRequestsPerPayload); err != nil {
return nil, err
}
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(e.Consolidations))
for i, c := range e.Consolidations {
consolidationRequests[i], err = c.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Consolidations[%d]", i))
}
}
return &enginev1.ExecutionRequests{
Deposits: depositRequests,
Withdrawals: withdrawalRequests,
Consolidations: consolidationRequests,
}, nil
}
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------
var (
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
)

View File

@@ -0,0 +1,563 @@
package structs
import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func fillByteSlice(sliceLength int, value byte) []byte {
bytes := make([]byte, sliceLength)
for index := range bytes {
bytes[index] = value
}
return bytes
}
// TestExecutionPayloadFromConsensus_HappyPath checks the
// ExecutionPayloadFromConsensus function under normal conditions.
func TestExecutionPayloadFromConsensus_HappyPath(t *testing.T) {
consensusPayload := &enginev1.ExecutionPayload{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 12345,
GasLimit: 15000000,
GasUsed: 8000000,
Timestamp: 1680000000,
ExtraData: fillByteSlice(8, 0x11),
BaseFeePerGas: fillByteSlice(32, 0x01),
BlockHash: fillByteSlice(common.HashLength, 0x22),
Transactions: [][]byte{
fillByteSlice(10, 0x33),
fillByteSlice(10, 0x44),
},
}
result, err := ExecutionPayloadFromConsensus(consensusPayload)
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, hexutil.Encode(consensusPayload.ParentHash), result.ParentHash)
require.Equal(t, hexutil.Encode(consensusPayload.FeeRecipient), result.FeeRecipient)
require.Equal(t, hexutil.Encode(consensusPayload.StateRoot), result.StateRoot)
require.Equal(t, hexutil.Encode(consensusPayload.ReceiptsRoot), result.ReceiptsRoot)
require.Equal(t, fmt.Sprintf("%d", consensusPayload.BlockNumber), result.BlockNumber)
}
// TestExecutionPayload_ToConsensus_HappyPath checks the
// (*ExecutionPayload).ToConsensus function under normal conditions.
func TestExecutionPayload_ToConsensus_HappyPath(t *testing.T) {
payload := &ExecutionPayload{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "12345",
GasLimit: "15000000",
GasUsed: "8000000",
Timestamp: "1680000000",
ExtraData: "0x11111111",
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x22)),
Transactions: []string{
hexutil.Encode(fillByteSlice(10, 0x33)),
hexutil.Encode(fillByteSlice(10, 0x44)),
},
}
result, err := payload.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, result.ParentHash, fillByteSlice(common.HashLength, 0xaa))
require.DeepEqual(t, result.FeeRecipient, fillByteSlice(20, 0xbb))
require.DeepEqual(t, result.StateRoot, fillByteSlice(32, 0xcc))
}
// TestExecutionPayloadHeaderFromConsensus_HappyPath checks the
// ExecutionPayloadHeaderFromConsensus function under normal conditions.
func TestExecutionPayloadHeaderFromConsensus_HappyPath(t *testing.T) {
consensusHeader := &enginev1.ExecutionPayloadHeader{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 9999,
GasLimit: 5000000,
GasUsed: 2500000,
Timestamp: 1111111111,
ExtraData: fillByteSlice(4, 0x12),
BaseFeePerGas: fillByteSlice(32, 0x34),
BlockHash: fillByteSlice(common.HashLength, 0x56),
TransactionsRoot: fillByteSlice(32, 0x78),
}
result, err := ExecutionPayloadHeaderFromConsensus(consensusHeader)
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, hexutil.Encode(consensusHeader.ParentHash), result.ParentHash)
require.Equal(t, fmt.Sprintf("%d", consensusHeader.BlockNumber), result.BlockNumber)
}
// TestExecutionPayloadHeader_ToConsensus_HappyPath checks the
// (*ExecutionPayloadHeader).ToConsensus function under normal conditions.
func TestExecutionPayloadHeader_ToConsensus_HappyPath(t *testing.T) {
header := &ExecutionPayloadHeader{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "9999",
GasLimit: "5000000",
GasUsed: "2500000",
Timestamp: "1111111111",
ExtraData: "0x1234abcd",
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
}
result, err := header.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
}
// TestExecutionPayloadCapellaFromConsensus_HappyPath checks the
// ExecutionPayloadCapellaFromConsensus function under normal conditions.
func TestExecutionPayloadCapellaFromConsensus_HappyPath(t *testing.T) {
capellaPayload := &enginev1.ExecutionPayloadCapella{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 123,
GasLimit: 9876543,
GasUsed: 1234567,
Timestamp: 5555555,
ExtraData: fillByteSlice(6, 0x11),
BaseFeePerGas: fillByteSlice(32, 0x22),
BlockHash: fillByteSlice(common.HashLength, 0x33),
Transactions: [][]byte{
fillByteSlice(5, 0x44),
},
Withdrawals: []*enginev1.Withdrawal{
{
Index: 1,
ValidatorIndex: 2,
Address: fillByteSlice(20, 0xaa),
Amount: 100,
},
},
}
result, err := ExecutionPayloadCapellaFromConsensus(capellaPayload)
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, hexutil.Encode(capellaPayload.ParentHash), result.ParentHash)
require.Equal(t, len(capellaPayload.Transactions), len(result.Transactions))
require.Equal(t, len(capellaPayload.Withdrawals), len(result.Withdrawals))
}
// TestExecutionPayloadCapella_ToConsensus_HappyPath checks the
// (*ExecutionPayloadCapella).ToConsensus function under normal conditions.
func TestExecutionPayloadCapella_ToConsensus_HappyPath(t *testing.T) {
capella := &ExecutionPayloadCapella{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "123",
GasLimit: "9876543",
GasUsed: "1234567",
Timestamp: "5555555",
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
Transactions: []string{
hexutil.Encode(fillByteSlice(5, 0x44)),
},
Withdrawals: []*Withdrawal{
{
WithdrawalIndex: "1",
ValidatorIndex: "2",
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
Amount: "100",
},
},
}
result, err := capella.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, hexutil.Encode(result.ParentHash), capella.ParentHash)
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), capella.FeeRecipient)
require.DeepEqual(t, hexutil.Encode(result.StateRoot), capella.StateRoot)
}
// TestExecutionPayloadDenebFromConsensus_HappyPath checks the
// ExecutionPayloadDenebFromConsensus function under normal conditions.
func TestExecutionPayloadDenebFromConsensus_HappyPath(t *testing.T) {
denebPayload := &enginev1.ExecutionPayloadDeneb{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 999,
GasLimit: 2222222,
GasUsed: 1111111,
Timestamp: 666666,
ExtraData: fillByteSlice(6, 0x11),
BaseFeePerGas: fillByteSlice(32, 0x22),
BlockHash: fillByteSlice(common.HashLength, 0x33),
Transactions: [][]byte{
fillByteSlice(5, 0x44),
},
Withdrawals: []*enginev1.Withdrawal{
{
Index: 1,
ValidatorIndex: 2,
Address: fillByteSlice(20, 0xaa),
Amount: 100,
},
},
BlobGasUsed: 1234,
ExcessBlobGas: 5678,
}
result, err := ExecutionPayloadDenebFromConsensus(denebPayload)
require.NoError(t, err)
require.Equal(t, hexutil.Encode(denebPayload.ParentHash), result.ParentHash)
require.Equal(t, len(denebPayload.Transactions), len(result.Transactions))
require.Equal(t, len(denebPayload.Withdrawals), len(result.Withdrawals))
require.Equal(t, "1234", result.BlobGasUsed)
require.Equal(t, fmt.Sprintf("%d", denebPayload.BlockNumber), result.BlockNumber)
}
// TestExecutionPayloadDeneb_ToConsensus_HappyPath checks the
// (*ExecutionPayloadDeneb).ToConsensus function under normal conditions.
func TestExecutionPayloadDeneb_ToConsensus_HappyPath(t *testing.T) {
deneb := &ExecutionPayloadDeneb{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "999",
GasLimit: "2222222",
GasUsed: "1111111",
Timestamp: "666666",
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
Transactions: []string{
hexutil.Encode(fillByteSlice(5, 0x44)),
},
Withdrawals: []*Withdrawal{
{
WithdrawalIndex: "1",
ValidatorIndex: "2",
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
Amount: "100",
},
},
BlobGasUsed: "1234",
ExcessBlobGas: "5678",
}
result, err := deneb.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, hexutil.Encode(result.ParentHash), deneb.ParentHash)
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), deneb.FeeRecipient)
require.Equal(t, result.BlockNumber, uint64(999))
}
func TestExecutionPayloadHeaderCapellaFromConsensus_HappyPath(t *testing.T) {
capellaHeader := &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 555,
GasLimit: 1111111,
GasUsed: 222222,
Timestamp: 3333333333,
ExtraData: fillByteSlice(4, 0x12),
BaseFeePerGas: fillByteSlice(32, 0x34),
BlockHash: fillByteSlice(common.HashLength, 0x56),
TransactionsRoot: fillByteSlice(32, 0x78),
WithdrawalsRoot: fillByteSlice(32, 0x99),
}
result, err := ExecutionPayloadHeaderCapellaFromConsensus(capellaHeader)
require.NoError(t, err)
require.Equal(t, hexutil.Encode(capellaHeader.ParentHash), result.ParentHash)
require.DeepEqual(t, hexutil.Encode(capellaHeader.WithdrawalsRoot), result.WithdrawalsRoot)
}
func TestExecutionPayloadHeaderCapella_ToConsensus_HappyPath(t *testing.T) {
header := &ExecutionPayloadHeaderCapella{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "555",
GasLimit: "1111111",
GasUsed: "222222",
Timestamp: "3333333333",
ExtraData: "0x1234abcd",
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
}
result, err := header.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
require.DeepEqual(t, hexutil.Encode(result.ReceiptsRoot), header.ReceiptsRoot)
require.DeepEqual(t, hexutil.Encode(result.WithdrawalsRoot), header.WithdrawalsRoot)
}
func TestExecutionPayloadHeaderDenebFromConsensus_HappyPath(t *testing.T) {
denebHeader := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BlockNumber: 999,
GasLimit: 5000000,
GasUsed: 2500000,
Timestamp: 4444444444,
ExtraData: fillByteSlice(4, 0x12),
BaseFeePerGas: fillByteSlice(32, 0x34),
BlockHash: fillByteSlice(common.HashLength, 0x56),
TransactionsRoot: fillByteSlice(32, 0x78),
WithdrawalsRoot: fillByteSlice(32, 0x99),
BlobGasUsed: 1234,
ExcessBlobGas: 5678,
}
result, err := ExecutionPayloadHeaderDenebFromConsensus(denebHeader)
require.NoError(t, err)
require.Equal(t, hexutil.Encode(denebHeader.ParentHash), result.ParentHash)
require.DeepEqual(t, hexutil.Encode(denebHeader.FeeRecipient), result.FeeRecipient)
require.DeepEqual(t, hexutil.Encode(denebHeader.StateRoot), result.StateRoot)
require.DeepEqual(t, fmt.Sprintf("%d", denebHeader.BlobGasUsed), result.BlobGasUsed)
}
func TestExecutionPayloadHeaderDeneb_ToConsensus_HappyPath(t *testing.T) {
header := &ExecutionPayloadHeaderDeneb{
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
BlockNumber: "999",
GasLimit: "5000000",
GasUsed: "2500000",
Timestamp: "4444444444",
ExtraData: "0x1234abcd",
BaseFeePerGas: "1234",
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
BlobGasUsed: "1234",
ExcessBlobGas: "5678",
}
result, err := header.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
require.DeepEqual(t, result.BlobGasUsed, uint64(1234))
require.DeepEqual(t, result.ExcessBlobGas, uint64(5678))
require.DeepEqual(t, result.BlockNumber, uint64(999))
}
func TestWithdrawalRequestsFromConsensus_HappyPath(t *testing.T) {
consensusRequests := []*enginev1.WithdrawalRequest{
{
SourceAddress: fillByteSlice(20, 0xbb),
ValidatorPubkey: fillByteSlice(48, 0xbb),
Amount: 12345,
},
{
SourceAddress: fillByteSlice(20, 0xcc),
ValidatorPubkey: fillByteSlice(48, 0xcc),
Amount: 54321,
},
}
result := WithdrawalRequestsFromConsensus(consensusRequests)
require.DeepEqual(t, len(result), len(consensusRequests))
require.DeepEqual(t, result[0].Amount, fmt.Sprintf("%d", consensusRequests[0].Amount))
}
func TestWithdrawalRequestFromConsensus_HappyPath(t *testing.T) {
req := &enginev1.WithdrawalRequest{
SourceAddress: fillByteSlice(20, 0xbb),
ValidatorPubkey: fillByteSlice(48, 0xbb),
Amount: 42,
}
result := WithdrawalRequestFromConsensus(req)
require.NotNil(t, result)
require.DeepEqual(t, result.SourceAddress, hexutil.Encode(fillByteSlice(20, 0xbb)))
}
func TestWithdrawalRequest_ToConsensus_HappyPath(t *testing.T) {
withdrawalReq := &WithdrawalRequest{
SourceAddress: hexutil.Encode(fillByteSlice(20, 111)),
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 123)),
Amount: "12345",
}
result, err := withdrawalReq.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, result.Amount, uint64(12345))
}
func TestConsolidationRequestsFromConsensus_HappyPath(t *testing.T) {
consensusRequests := []*enginev1.ConsolidationRequest{
{
SourceAddress: fillByteSlice(20, 111),
SourcePubkey: fillByteSlice(48, 112),
TargetPubkey: fillByteSlice(48, 113),
},
}
result := ConsolidationRequestsFromConsensus(consensusRequests)
require.DeepEqual(t, len(result), len(consensusRequests))
require.DeepEqual(t, result[0].SourceAddress, "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f")
}
func TestDepositRequestsFromConsensus_HappyPath(t *testing.T) {
ds := []*enginev1.DepositRequest{
{
Pubkey: fillByteSlice(48, 0xbb),
WithdrawalCredentials: fillByteSlice(32, 0xdd),
Amount: 98765,
Signature: fillByteSlice(96, 0xff),
Index: 111,
},
}
result := DepositRequestsFromConsensus(ds)
require.DeepEqual(t, len(result), len(ds))
require.DeepEqual(t, result[0].Amount, "98765")
}
func TestDepositRequest_ToConsensus_HappyPath(t *testing.T) {
req := &DepositRequest{
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
Amount: "123",
Signature: hexutil.Encode(fillByteSlice(96, 0xdd)),
Index: "456",
}
result, err := req.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, result.Amount, uint64(123))
require.DeepEqual(t, result.Signature, fillByteSlice(96, 0xdd))
}
func TestExecutionRequestsFromConsensus_HappyPath(t *testing.T) {
er := &enginev1.ExecutionRequests{
Deposits: []*enginev1.DepositRequest{
{
Pubkey: fillByteSlice(48, 0xba),
WithdrawalCredentials: fillByteSlice(32, 0xaa),
Amount: 33,
Signature: fillByteSlice(96, 0xff),
Index: 44,
},
},
Withdrawals: []*enginev1.WithdrawalRequest{
{
SourceAddress: fillByteSlice(20, 0xaa),
ValidatorPubkey: fillByteSlice(48, 0xba),
Amount: 555,
},
},
Consolidations: []*enginev1.ConsolidationRequest{
{
SourceAddress: fillByteSlice(20, 0xdd),
SourcePubkey: fillByteSlice(48, 0xdd),
TargetPubkey: fillByteSlice(48, 0xcc),
},
},
}
result := ExecutionRequestsFromConsensus(er)
require.NotNil(t, result)
require.Equal(t, 1, len(result.Deposits))
require.Equal(t, "33", result.Deposits[0].Amount)
require.Equal(t, 1, len(result.Withdrawals))
require.Equal(t, "555", result.Withdrawals[0].Amount)
require.Equal(t, 1, len(result.Consolidations))
require.Equal(t, "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", result.Consolidations[0].TargetPubkey)
}
func TestExecutionRequests_ToConsensus_HappyPath(t *testing.T) {
execReq := &ExecutionRequests{
Deposits: []*DepositRequest{
{
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
Amount: "33",
Signature: hexutil.Encode(fillByteSlice(96, 0xff)),
Index: "44",
},
},
Withdrawals: []*WithdrawalRequest{
{
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xdd)),
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
Amount: "555",
},
},
Consolidations: []*ConsolidationRequest{
{
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xcc)),
SourcePubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
TargetPubkey: hexutil.Encode(fillByteSlice(48, 0xcc)),
},
},
}
result, err := execReq.ToConsensus()
require.NoError(t, err)
require.Equal(t, 1, len(result.Deposits))
require.Equal(t, uint64(33), result.Deposits[0].Amount)
require.Equal(t, 1, len(result.Withdrawals))
require.Equal(t, uint64(555), result.Withdrawals[0].Amount)
require.Equal(t, 1, len(result.Consolidations))
require.DeepEqual(t, fillByteSlice(48, 0xcc), result.Consolidations[0].TargetPubkey)
}

View File

@@ -54,4 +54,5 @@ type ForkChoiceNodeExtraData struct {
Balance string `json:"balance"`
ExecutionOptimistic bool `json:"execution_optimistic"`
TimeStamp string `json:"timestamp"`
Target string `json:"target"`
}

View File

@@ -244,26 +244,6 @@ type Withdrawal struct {
Amount string `json:"amount"`
}
type DepositRequest struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Index string `json:"index"`
}
type WithdrawalRequest struct {
SourceAddress string `json:"source_address"`
ValidatorPubkey string `json:"validator_pubkey"`
Amount string `json:"amount"`
}
type ConsolidationRequest struct {
SourceAddress string `json:"source_address"`
SourcePubkey string `json:"source_pubkey"`
TargetPubkey string `json:"target_pubkey"`
}
type PendingDeposit struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`

View File

@@ -125,7 +125,7 @@ func getChan(key string) chan byte {
// Return a new string with unique elements.
func unique(arr []string) []string {
if arr == nil || len(arr) <= 1 {
if len(arr) <= 1 {
return arr
}

View File

@@ -12,6 +12,7 @@ go_library(
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"holeskyhack.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
@@ -25,8 +26,8 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"service.go",
"setup_forchoice.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
],
@@ -50,7 +51,6 @@ go_library(
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -144,7 +144,6 @@ go_test(
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
@@ -161,12 +160,10 @@ go_test(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -33,7 +33,6 @@ var (
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.

View File

@@ -444,9 +444,6 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
log.WithError(err).Debug("Could not remove blob from blob storage")
}
if err := s.dataColumnStorage.Remove(root); err != nil {
log.WithError(err).Debug("Could not remove data columns from data column storage")
}
}
return nil
}

View File

@@ -0,0 +1,21 @@
package blockchain
import (
"encoding/hex"
"github.com/pkg/errors"
)
var errHoleskyForbiddenRoot = errors.New("refusing to process forbidden holesky block")
// hack to prevent bad holesky block importation
var badHoleskyRoot [32]byte
func init() {
hexStr := "2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359"
bytes, err := hex.DecodeString(hexStr)
if err != nil {
panic(err)
}
badHoleskyRoot = [32]byte(bytes)
}

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"kzg.go",
"trusted_setup.go",
"validation.go",
],
@@ -13,9 +12,6 @@ go_library(
deps = [
"//consensus-types/blocks:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
@@ -30,8 +26,7 @@ go_test(
deps = [
"//consensus-types/blocks:go_default_library",
"//testing/require:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"//testing/util:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,125 +0,0 @@
package kzg
import (
"github.com/pkg/errors"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
)
// BytesPerBlob is the number of bytes in a single blob.
const BytesPerBlob = ckzg4844.BytesPerBlob
// Blob represents a serialized chunk of data.
type Blob [BytesPerBlob]byte
// BytesPerCell is the number of bytes in a single cell.
const BytesPerCell = ckzg4844.BytesPerCell
// Cell represents a chunk of an encoded Blob.
type Cell [BytesPerCell]byte
// Commitment represent a KZG commitment to a Blob.
type Commitment [48]byte
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
type Proof [48]byte
// Bytes48 is a 48-byte array.
type Bytes48 = ckzg4844.Bytes48
// Bytes32 is a 32-byte array.
type Bytes32 = ckzg4844.Bytes32
// CellsAndProofs represents the Cells and Proofs corresponding to
// a single blob.
type CellsAndProofs struct {
Cells []Cell
Proofs []Proof
}
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
kzgBlob := kzg4844.Blob(*blob)
comm, err := kzg4844.BlobToCommitment(&kzgBlob)
if err != nil {
return Commitment{}, err
}
return Commitment(comm), nil
}
func ComputeCells(blob *Blob) ([]Cell, error) {
ckzgBlob := (*ckzg4844.Blob)(blob)
ckzgCells, err := ckzg4844.ComputeCells(ckzgBlob)
if err != nil {
return nil, errors.Wrap(err, "compute cells")
}
cells := make([]Cell, len(ckzgCells))
for i := range ckzgCells {
cells[i] = Cell(ckzgCells[i])
}
return cells, nil
}
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
kzgBlob := kzg4844.Blob(*blob)
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
if err != nil {
return [48]byte{}, err
}
return Proof(proof), nil
}
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
ckzgBlob := (*ckzg4844.Blob)(blob)
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
if err != nil {
return CellsAndProofs{}, err
}
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
for i := range partialCells {
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
}
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
if err != nil {
return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs")
}
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
// Convert cells/proofs to the CellsAndProofs type defined in this package.
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
if len(ckzgCells) != len(ckzgProofs) {
return CellsAndProofs{}, errors.New("different number of cells/proofs")
}
var cells []Cell
var proofs []Proof
for i := range ckzgCells {
cells = append(cells, Cell(ckzgCells[i]))
proofs = append(proofs, Proof(ckzgProofs[i]))
}
return CellsAndProofs{
Cells: cells,
Proofs: proofs,
}, nil
}

View File

@@ -5,8 +5,6 @@ import (
"encoding/json"
GoKZG "github.com/crate-crypto/go-kzg-4844"
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
)
@@ -14,53 +12,17 @@ var (
//go:embed trusted_setup.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context
kzgLoaded bool
)
type TrustedSetup struct {
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
}
func Start() error {
trustedSetup := &TrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
parsedSetup := GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
if err != nil {
return errors.Wrap(err, "could not parse trusted setup JSON")
}
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
SetupG2: trustedSetup.G2Monomial[:],
SetupG1Lagrange: trustedSetup.G1Lagrange})
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
if err != nil {
return errors.Wrap(err, "could not initialize go-kzg context")
}
// Length of a G1 point, converted from hex to binary.
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
for i, g1 := range &trustedSetup.G1Monomial {
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
}
// Length of a G1 point, converted from hex to binary.
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
for i, g1 := range &trustedSetup.G1Lagrange {
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
}
// Length of a G2 point, converted from hex to binary.
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
for i, g2 := range &trustedSetup.G2Monomial {
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
}
if !kzgLoaded {
// TODO: Provide a configuration option for this.
var precompute uint = 8
// Free the current trusted setup before running this method. CKZG
// panics if the same setup is run multiple times.
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
panic(err)
}
}
kzgLoaded = true
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,12 @@
package kzg
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"testing"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/sirupsen/logrus"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
@@ -41,7 +37,7 @@ func TestBytesToAny(t *testing.T) {
}
func TestGenerateCommitmentAndProof(t *testing.T) {
blob := getRandBlob(123)
blob := util.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
@@ -49,36 +45,3 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
require.Equal(t, expectedCommitment, commitment)
require.Equal(t, expectedProof, proof)
}
func deterministicRandomness(seed int64) [32]byte {
// Converts an int64 to a byte slice
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, seed)
if err != nil {
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
return [32]byte{}
}
bytes := buf.Bytes()
return sha256.Sum256(bytes)
}
// Returns a serialized random field element in big-endian
func getRandFieldElement(seed int64) [32]byte {
bytes := deterministicRandomness(seed)
var r fr.Element
r.SetBytes(bytes[:])
return GoKZG.SerializeScalar(r)
}
// Returns a random blob using the passed seed as entropy
func getRandBlob(seed int64) GoKZG.Blob {
var blob GoKZG.Blob
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
fieldElementBytes := getRandFieldElement(seed + int64(i))
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
}
return blob
}

View File

@@ -1,12 +1,9 @@
package blockchain
import (
"time"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
@@ -129,9 +126,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Acceser) Option {
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
return func(s *Service) error {
s.cfg.P2P = p
s.cfg.P2p = p
return nil
}
}
@@ -210,14 +207,6 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
}
}
// WithDataColumnStorage sets the data column storage backend for the blockchain service.
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
return func(s *Service) error {
s.dataColumnStorage = b
return nil
}
}
func WithSyncChecker(checker Checker) Option {
return func(s *Service) error {
s.cfg.SyncChecker = checker
@@ -225,16 +214,9 @@ func WithSyncChecker(checker Checker) Option {
}
}
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {
s.cfg.CustodyInfo = custodyInfo
return nil
}
}
func WithGenesisTime(genesisTime time.Time) Option {
return func(s *Service) error {
s.genesisTime = genesisTime
s.slasherEnabled = enabled
return nil
}
}

View File

@@ -3,13 +3,11 @@ package blockchain
import (
"context"
"fmt"
"slices"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
@@ -17,7 +15,6 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -176,6 +173,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
var set *bls.SignatureBatch
boundaries := make(map[[32]byte]state.BeaconState)
for i, b := range blks {
if b.Root() == badHoleskyRoot {
return errHoleskyForbiddenRoot
}
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return err
@@ -237,9 +237,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return err
}
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
@@ -526,206 +525,20 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
return missing, nil
}
func missingDataColumns(bs *filesystem.DataColumnStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
return nil, errMaxDataColumnsExceeded
}
// Get a summary of the data columns stored in the database.
summary := bs.Summary(root)
// Check all expected data columns against the summary.
missing := make(map[uint64]bool)
for column := range expected {
if !summary.HasIndex(column) {
missing[column] = true
}
}
return missing, nil
}
// isDataAvailable blocks until all sidecars committed to in the block are available,
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
// The function will first check the database to see if all sidecars have been persisted. If any
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
block := signedBlock.Block()
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
block := signed.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
blockVersion := block.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
}
if blockVersion >= version.Deneb {
return s.areBlobsAvailable(ctx, root, block)
}
return nil
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
// If block has not commitments there is nothing to wait for.
if len(kzgCommitments) == 0 {
return nil
}
// All columns to sample need to be available for the block to be considered available.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
nodeID := s.cfg.P2P.NodeID()
// Prevent custody group count to change during the rest of the function.
s.cfg.CustodyInfo.Mut.RLock()
defer s.cfg.CustodyInfo.Mut.RUnlock()
// Get the custody group sampling size for the node.
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
if err != nil {
return errors.Wrap(err, "peer info")
}
// Subscribe to newly data columns stored in the database.
identsChan := make(chan filesystem.DataColumnsIdent)
subscription := s.dataColumnStorage.DataColumnFeed.Subscribe(identsChan)
defer subscription.Unsubscribe()
// Get the count of data columns we already have in the store.
summary := s.dataColumnStorage.Summary(root)
storedDataColumnsCount := summary.Count()
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
return nil
}
// Get a map of data column indices that are not currently available.
missingMap, err := missingDataColumns(s.dataColumnStorage, root, peerInfo.CustodyColumns)
if err != nil {
return errors.Wrap(err, "missing data columns")
}
// If there are no missing indices, all data column sidecars are available.
// This is the happy path.
if len(missingMap) == 0 {
return nil
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
timer := time.AfterFunc(time.Until(nextSlot), func() {
missingMapCount := uint64(len(missingMap))
if missingMapCount == 0 {
return
}
var (
expected interface{} = "all"
missing interface{} = "all"
)
numberOfColumns := params.BeaconConfig().NumberOfColumns
colMapCount := uint64(len(peerInfo.CustodyColumns))
if colMapCount < numberOfColumns {
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
}
if missingMapCount < numberOfColumns {
missing = uint64MapToSortedSlice(missingMap)
}
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"root": fmt.Sprintf("%#x", root),
"columnsExpected": expected,
"columnsWaiting": missing,
}).Warning("Data columns still missing at slot end")
})
defer timer.Stop()
}
for {
select {
case idents := <-identsChan:
if idents.Root != root {
// This is not the root we are looking for.
continue
}
for _, index := range idents.Indices {
// This is a data column we are expecting.
if _, ok := missingMap[index]; ok {
storedDataColumnsCount++
}
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
return nil
}
// Remove the index from the missing map.
delete(missingMap, index)
// Return if there is no more missing data columns.
if len(missingMap) == 0 {
return nil
}
}
case <-ctx.Done():
var missingIndices interface{} = "all"
numberOfColumns := params.BeaconConfig().NumberOfColumns
missingIndicesCount := uint64(len(missingMap))
if missingIndicesCount < numberOfColumns {
missingIndices = uint64MapToSortedSlice(missingMap)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
}
}
}
// areBlobsAvailable blocks until all BlobSidecars committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
blockSlot := block.Slot()
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
return nil
@@ -747,7 +560,7 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
// get a map of BlobSidecar indices that are not currently available.
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
if err != nil {
return errors.Wrap(err, "missing indices")
return err
}
// If there are no missing indices, all BlobSidecars are available.
if len(missing) == 0 {
@@ -759,20 +572,15 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
nc := s.blobNotifiers.forRoot(root, block.Slot())
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(logrus.Fields{
"slot": blockSlot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": len(missing),
}).Error("Still waiting for blobs DA check at slot end.")
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
@@ -794,14 +602,13 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
}
}
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
output := make([]uint64, 0, len(input))
for idx := range input {
output = append(output, idx)
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
slices.Sort[[]uint64](output)
return output
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
@@ -887,7 +694,7 @@ func (s *Service) waitForSync() error {
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"strconv"
@@ -16,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
@@ -28,7 +26,6 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -2042,7 +2039,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, true, optimistic)
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint
@@ -3151,151 +3148,3 @@ func TestSaveLightClientBootstrap(t *testing.T) {
reset()
}
type testIsAvailableParams struct {
options []Option
blobKzgCommitmentsCount uint64
columnsToSave []uint64
}
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
ctx, cancel := context.WithCancel(context.Background())
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
service, _ := minimalTestService(t, options...)
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
err := service.saveGenesisData(ctx, genesisState)
require.NoError(t, err)
conf := util.DefaultBlockGenConfig()
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
require.NoError(t, err)
root, err := signedBeaconBlock.Block.HashTreeRoot()
require.NoError(t, err)
dataColumnsParams := make([]verification.DataColumnParams, 0, len(params.columnsToSave))
for _, i := range params.columnsToSave {
dataColumnParam := verification.DataColumnParams{ColumnIndex: i}
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
}
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
_, verifiedRODataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
err = dataColumnStorage.Save(verifiedRODataColumns)
require.NoError(t, err)
signed, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlock)
require.NoError(t, err)
return ctx, cancel, service, root, signed
}
func TestIsDataAvailable(t *testing.T) {
t.Run("Fulu - out of retention window", func(t *testing.T) {
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
indices := make([]uint64, 0, halfNumberOfColumns)
for i := range halfNumberOfColumns {
indices = append(indices, i)
}
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - no missing data columns", func(t *testing.T) {
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
var custodyInfo peerdas.CustodyInfo
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(128)
custodyInfo.ToAdvertiseGroupCount.Set(128)
testParams := testIsAvailableParams{
options: []Option{WithCustodyInfo(&custodyInfo)},
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// If needed, generate a random root that is different from the block root.
var randomRoot [fieldparams.RootLength]byte
for randomRoot == root {
randomRootSlice := make([]byte, fieldparams.RootLength)
_, err := rand.Read(randomRootSlice)
require.NoError(t, err)
copy(randomRoot[:], randomRootSlice)
}
// TODO: Achieve the same result without using time.AfterFunc.
time.AfterFunc(10*time.Millisecond, func() {
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
indices := make([]uint64, 0, halfNumberOfColumns)
for i := range halfNumberOfColumns {
indices = append(indices, i)
}
withSomeRequiredColumns := filesystem.DataColumnsIdent{Root: root, Indices: indices}
service.dataColumnStorage.DataColumnFeed.Send(withSomeRequiredColumns)
})
err := service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
blobKzgCommitmentsCount: 3,
}
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
// TODO: Achieve the same result without using time.AfterFunc.
time.AfterFunc(10*time.Millisecond, func() {
cancel()
})
err := service.isDataAvailable(ctx, root, signed)
require.NotNil(t, err)
})
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -52,13 +51,6 @@ type BlobReceiver interface {
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// DataColumnReceiver interface defines the methods of chain service for receiving new
// data columns
type DataColumnReceiver interface {
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
@@ -72,12 +64,16 @@ type SlashingReceiver interface {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
if blockRoot == badHoleskyRoot {
return errHoleskyForbiddenRoot
}
// Return early if the block has been synced
if s.InForkchoice(blockRoot) {
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
return nil
}
receivedTime := time.Now()
s.blockBeingSynced.set(blockRoot)
defer s.blockBeingSynced.unset(blockRoot)
@@ -86,7 +82,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
@@ -102,12 +97,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return err
}
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
if err != nil {
return err
}
// Defragment the state before continuing block processing.
s.defragmentState(postState)
@@ -132,7 +125,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
if s.slasherEnabled {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
}
@@ -236,32 +229,24 @@ func (s *Service) handleDA(
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
avs das.AvailabilityStore,
) (elapsed time.Duration, err error) {
defer func(start time.Time) {
elapsed := time.Since(start)
if err == nil {
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
) (time.Duration, error) {
daStartTime := time.Now()
if avs != nil {
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return 0, err
}
}(time.Now())
if avs == nil {
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
return
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability")
}
return
}
var rob blocks.ROBlock
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return
}
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
return
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
return daWaitedTime, nil
}
func (s *Service) reportPostBlockProcessing(

View File

@@ -1,25 +0,0 @@
package blockchain
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
)
// ReceiveDataColumns receives a batch of data columns.
func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save(dataColumnSidecars); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
return nil
}
// ReceiveDataColumn receives a single data column.
// (It is only a wrapper around ReceiveDataColumns.)
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
return nil
}

View File

@@ -3,7 +3,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"runtime"
@@ -17,14 +16,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings"
@@ -33,8 +30,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -44,6 +39,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
// Service represents a service that handles the internal
@@ -67,7 +63,7 @@ type Service struct {
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
slasherEnabled bool
}
// config options for the service.
@@ -83,7 +79,7 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2P p2p.Acceser
P2p p2p.Broadcaster
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
@@ -95,7 +91,6 @@ type config struct {
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
CustodyInfo *peerdas.CustodyInfo
}
// Checker is an interface used to determine if a node is in initial sync
@@ -109,26 +104,22 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
type blobNotifierMap struct {
sync.RWMutex
notifiers map[[32]byte]chan uint64
// TODO: Separate blobs from data columns
// seenIndex map[[32]byte][]bool
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
seenIndex map[[32]byte][]bool
}
// notifyIndex notifies a blob by its index for a given root.
// It uses internal maps to keep track of seen indices and notifier channels.
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
// if idx >= uint64(maxBlobsPerBlock) {
// return
// }
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if idx >= uint64(maxBlobsPerBlock) {
return
}
bn.Lock()
seen := bn.seenIndex[root]
// TODO: Separate blobs from data columns
// if seen == nil {
// seen = make([]bool, maxBlobsPerBlock)
// }
if seen == nil {
seen = make([]bool, maxBlobsPerBlock)
}
if seen[idx] {
bn.Unlock()
return
@@ -139,9 +130,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
// Retrieve or create the notifier channel for the given root.
c, ok := bn.notifiers[root]
if !ok {
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
c = make(chan uint64, maxBlobsPerBlock)
bn.notifiers[root] = c
}
@@ -151,15 +140,12 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
}
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
bn.Lock()
defer bn.Unlock()
c, ok := bn.notifiers[root]
if !ok {
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
c = make(chan uint64, maxBlobsPerBlock)
bn.notifiers[root] = c
}
return c
@@ -185,9 +171,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifierMap{
notifiers: make(map[[32]byte]chan uint64),
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
seenIndex: make(map[[32]byte][]bool),
}
srv := &Service{
ctx: ctx,
@@ -284,69 +268,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return err
}
s.originBlockRoot = originRoot
if err := s.initializeHeadFromDB(s.ctx); err != nil {
return errors.Wrap(err, "could not set up chain info")
st, err := s.cfg.StateGen.Resume(s.ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if justified == nil {
return errNilJustifiedCheckpoint
}
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedCheckpoint
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint state")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
if err := s.setupForkchoice(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice")
}
// not attempting to save initial sync blocks here, because there shouldn't be any until
// after the statefeed.Initialized event is fired (below)
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
cp := s.FinalizedCheckpt()
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, cp.Epoch); err != nil {
// Exit run time if the node failed to verify weak subjectivity checkpoint.
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
}
@@ -355,7 +288,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
return nil
}
@@ -385,46 +317,40 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
return genesisBlkRoot, nil
}
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
// initializeHeadFromDB uses the finalized checkpoint and head block root from forkchoice to set the current head.
// Note that this may block until stategen replays blocks between the finalized and head blocks
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint from db")
}
if finalized == nil {
// This should never happen. At chain start, the finalized checkpoint
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
var finalizedState state.BeaconState
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if finalizedState == nil || finalizedState.IsNil() {
func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) error {
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if st == nil || st.IsNil() {
return errors.New("finalized state can't be nil")
}
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
s.cfg.ForkChoiceStore.RLock()
root := s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
s.cfg.ForkChoiceStore.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
return errors.Wrap(err, "could not get head block")
}
if err := s.setHead(&head{
finalizedRoot,
finalizedBlock,
finalizedState,
finalizedBlock.Block().Slot(),
if root != fRoot {
st, err = s.cfg.StateGen.StateByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": blk.Block().Slot(),
}).Info("Initialized head block from DB")
return errors.Wrap(s.setHead(&head{
root,
blk,
st,
blk.Block().Slot(),
false,
}); err != nil {
return errors.Wrap(err, "could not set head")
}
return nil
}), "could not set head")
}
func (s *Service) startFromExecutionChain() error {

View File

@@ -97,14 +97,13 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithSlashingPool(slashings.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithP2PBroadcaster(&mockAccesser{}),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithAttestationService(attService),
WithStateGen(stateGen),
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithClockSynchronizer(startup.NewClockSynchronizer()),
WithP2PBroadcaster(&mockAccesser{}),
}
chainService, err := NewService(ctx, opts...)
@@ -588,9 +587,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
func TestNotifyIndex(t *testing.T) {
// Initialize a blobNotifierMap
bn := &blobNotifierMap{
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
seenIndex: make(map[[32]byte][]bool),
notifiers: make(map[[32]byte]chan uint64),
}

View File

@@ -0,0 +1,187 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) setupForkchoice(st state.BeaconState) error {
if err := s.setupForkchoiceCheckpoints(); err != nil {
return errors.Wrap(err, "could not set up forkchoice checkpoints")
}
if err := s.setupForkchoiceTree(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice root")
}
if err := s.initializeHead(s.ctx, st); err != nil {
return errors.Wrap(err, "could not initialize head from db")
}
return nil
}
func (s *Service) startupHeadRoot() [32]byte {
headStr := features.Get().ForceHead
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if headStr == "" {
return fRoot
}
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
return root
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
}
func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
headRoot := s.startupHeadRoot()
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if err := s.setupForkchoiceRoot(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice root")
}
fBlk, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if err := s.setHead(&head{
fRoot,
fBlk,
st,
fBlk.Block().Slot(),
false,
}); err != nil {
return errors.Wrap(err, "could not set head")
}
if headRoot == fRoot {
return nil
}
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
if err != nil {
log.WithError(err).Error("could not get head block, starting with finalized block as head")
return nil
}
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
return nil
}
chain, err := s.buildForkchoiceChain(s.ctx, blk)
if err != nil {
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
return nil
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
}
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
chain := []*forkchoicetypes.BlockAndCheckpoints{}
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
jp := s.CurrentJustifiedCheckpt()
root, err := head.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get head block root")
}
for {
roblock, err := blocks.NewROBlockWithRoot(head, root)
if err != nil {
return nil, err
}
// This chain sets the justified checkpoint for every block, including some that are older than jp.
// This should be however safe for forkchoice at startup.
chain = append(chain, &forkchoicetypes.BlockAndCheckpoints{Block: roblock, JustifiedCheckpoint: jp, FinalizedCheckpoint: cp})
root = head.Block().ParentRoot()
if root == fRoot {
break
}
head, err = s.cfg.BeaconDB.Block(s.ctx, root)
if err != nil {
return nil, errors.Wrap(err, "could not get block")
}
if slots.ToEpoch(head.Block().Slot()) < cp.Epoch {
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
}
}
return chain, nil
}
func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(fRoot[:], lastValidatedCheckpoint.Root) {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
}
return nil
}
func (s *Service) setupForkchoiceCheckpoints() error {
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if justified == nil {
return errNilJustifiedCheckpoint
}
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedCheckpoint
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: fRoot}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
return nil
}

View File

@@ -19,10 +19,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"google.golang.org/protobuf/proto"
@@ -47,11 +45,6 @@ type mockBroadcaster struct {
broadcastCalled bool
}
type mockAccesser struct {
mockBroadcaster
p2pTesting.MockPeerManager
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
@@ -72,11 +65,6 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
@@ -132,10 +120,8 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
WithDepositCache(dc),
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
WithSyncChecker(mock.MockChecker{}),
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
WithP2PBroadcaster(&mockAccesser{}),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)

View File

@@ -74,7 +74,6 @@ type ChainService struct {
BlockSlot primitives.Slot
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
DataColumns []blocks.VerifiedRODataColumn
TargetRoot [32]byte
}
@@ -703,17 +702,6 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
return nil
}
// ReceiveDataColumn implements the same method in chain service
func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
c.DataColumns = append(c.DataColumns, dc)
return nil
}
// ReceiveDataColumns implements the same method in chain service
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
return nil
}
// TargetRootForEpoch mocks the same method in the chain service
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -230,17 +230,14 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
if body.Version() < version.Deneb {
return nil
}
kzgs, err := body.BlobKzgCommitments()
if err != nil {
return err
}
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
if commitmentCount > maxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(kzgs) > maxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
}
return nil
}

View File

@@ -926,10 +926,8 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
require.NoError(t, err)
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
rb, err = consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
}

View File

@@ -96,24 +96,6 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.

View File

@@ -35,9 +35,6 @@ const (
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
SingleAttReceived = 9
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
DataColumnSidecarReceived = 10
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -88,6 +85,3 @@ type AttesterSlashingReceivedData struct {
type SingleAttReceivedData struct {
Attestation ethpb.Att
}
type DataColumnSidecarReceivedData struct {
DataColumn *blocks.VerifiedRODataColumn
}

View File

@@ -69,10 +69,6 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, err
}
depostiRequestsStartIndex, err := beaconState.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
if err != nil {
return nil, err
@@ -158,7 +154,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: depostiRequestsStartIndex,
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
DepositBalanceToConsume: depositBalanceToConsume,
ExitBalanceToConsume: exitBalanceToConsume,
EarliestExitEpoch: earliestExitEpoch,

View File

@@ -78,7 +78,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -265,7 +264,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
params.SetupTestConfigCleanup(t)
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -1,69 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"das_core.go",
"info.go",
"metrics.go",
"p2p_interface.go",
"peer_sampling.go",
"reconstruction.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/state:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"das_core_test.go",
"info_test.go",
"p2p_interface_test.go",
"peer_sampling_test.go",
"reconstruction_test.go",
"utils_test.go",
],
deps = [
":go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,375 +0,0 @@
package peerdas
import (
"encoding/binary"
"math"
"slices"
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
beaconState "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var (
// Custom errors
errCustodyGroupCountTooLarge = errors.New("custody group count too large")
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
// maxUint256 is the maximum value of a uint256.
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
)
type CustodyType int
const (
Target CustodyType = iota
Actual
)
// CustodyGroups computes the custody groups the node should participate in for custody.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) (map[uint64]bool, error) {
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
// Check if the custody group count is larger than the number of custody groups.
if custodyGroupCount > numberOfCustodyGroup {
return nil, errCustodyGroupCountTooLarge
}
custodyGroups := make(map[uint64]bool, custodyGroupCount)
one := uint256.NewInt(1)
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; currentId.Add(currentId, one) {
// Convert to big endian bytes.
currentIdBytesBigEndian := currentId.Bytes32()
// Convert to little endian.
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
// Hash the result.
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
// Get the custody group ID.
custodyGroupId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
// Add the custody group to the map.
custodyGroups[custodyGroupId] = true
// Overflow prevention.
if currentId.Cmp(maxUint256) == 0 {
currentId = uint256.NewInt(0)
}
}
// Final check.
if uint64(len(custodyGroups)) != custodyGroupCount {
return nil, errWrongComputedCustodyGroupCount
}
return custodyGroups, nil
}
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
beaconConfig := params.BeaconConfig()
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
if custodyGroup > numberOfCustodyGroup {
return nil, errCustodyGroupCountTooLarge
}
numberOfColumns := beaconConfig.NumberOfColumns
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
columns := make([]uint64, 0, columnsPerGroup)
for i := range columnsPerGroup {
column := numberOfCustodyGroup*i + custodyGroup
columns = append(columns, column)
}
return columns, nil
}
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
start := time.Now()
if signedBlock == nil || len(cellsAndProofs) == 0 {
return nil, nil
}
block := signedBlock.Block()
blockBody := block.Body()
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
if len(blobKzgCommitments) != len(cellsAndProofs) {
return nil, errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
}
signedBlockHeader, err := signedBlock.Header()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
}
blobsCount := len(cellsAndProofs)
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
column := make([]kzg.Cell, 0, blobsCount)
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
cellsForRow := cellsAndProofs[rowIndex].Cells
proofsForRow := cellsAndProofs[rowIndex].Proofs
cell := cellsForRow[columnIndex]
column = append(column, cell)
kzgProof := proofsForRow[columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
columnBytes = append(columnBytes, column[i][:])
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
copiedProof := kzgProof
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
Index: columnIndex,
Column: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProofs: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return sidecars, nil
}
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
if ct == Actual {
custodyGroupCount = custodyInfo.ActualGroupCount()
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
return max(samplesPerSlot, custodyGroupCount)
}
// CustodyColumns computes the custody columns from the custody groups.
func CustodyColumns(custodyGroups map[uint64]bool) (map[uint64]bool, error) {
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
custodyGroupCount := len(custodyGroups)
// Compute the columns for each custody group.
columns := make(map[uint64]bool, custodyGroupCount)
for group := range custodyGroups {
if group >= numberOfCustodyGroups {
return nil, errCustodyGroupCountTooLarge
}
groupColumns, err := ComputeColumnsForCustodyGroup(group)
if err != nil {
return nil, errors.Wrap(err, "compute columns for custody group")
}
for _, column := range groupColumns {
columns[column] = true
}
}
return columns, nil
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#validator-custody
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
totalNodeBalance := uint64(0)
for index := range validatorsIndex {
balance, err := state.BalanceAtIndex(index)
if err != nil {
return 0, errors.Wrapf(err, "balance at index for validator index %v", index)
}
totalNodeBalance += balance
}
beaconConfig := params.BeaconConfig()
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
count := totalNodeBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
}
// Blobs extract blobs from `dataColumnsSidecar`.
// This can be seen as the reciprocal function of DataColumnSidecars.
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
// else an error will be returned.
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
columnCount := fieldparams.NumberOfColumns
neededColumnCount := columnCount / 2
// Check if all needed columns are present.
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
for i := range dataColumnsSidecar {
dataColumnSideCar := dataColumnsSidecar[i]
index := dataColumnSideCar.Index
if index < uint64(neededColumnCount) {
sliceIndexFromColumnIndex[index] = i
}
}
actualColumnCount := len(sliceIndexFromColumnIndex)
// Get missing columns.
if actualColumnCount < neededColumnCount {
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
for i := range neededColumnCount {
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
missingColumns[i] = true
}
}
missingColumnsSlice := make([]int, 0, len(missingColumns))
for i := range missingColumns {
missingColumnsSlice = append(missingColumnsSlice, i)
}
slices.Sort[[]int](missingColumnsSlice)
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
}
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
firstDataColumnSidecar := dataColumnsSidecar[0]
blobCount := uint64(len(firstDataColumnSidecar.Column))
// Check all colums have te same length.
for i := range dataColumnsSidecar {
if uint64(len(dataColumnsSidecar[i].Column)) != blobCount {
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column))
}
}
// Reconstruct verified RO blobs from columns.
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
// Populate and filter indices.
indicesSlice := populateAndFilterIndices(indices, blobCount)
for _, blobIndex := range indicesSlice {
var blob kzg.Blob
// Compute the content of the blob.
for columnIndex := range neededColumnCount {
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
if !ok {
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
}
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
cell := dataColumnSideCar.Column[blobIndex]
for i := 0; i < len(cell); i++ {
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
}
}
// Retrieve the blob KZG commitment.
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
// Compute the blob KZG proof.
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
if err != nil {
return nil, errors.Wrap(err, "compute blob KZG proof")
}
blobSidecar := &ethpb.BlobSidecar{
Index: blobIndex,
Blob: blob[:],
KzgCommitment: blobKZGCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
if err != nil {
return nil, errors.Wrap(err, "new RO blob")
}
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
return verifiedROBlobs, nil
}
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
// and filtering out indices higher than the blob count.
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
// If no indices are provided, provide all blobs.
if len(indices) == 0 {
for i := range blobCount {
indices[i] = true
}
}
// Filter blobs index higher than the blob count.
filteredIndices := make(map[uint64]bool, len(indices))
for i := range indices {
if i < blobCount {
filteredIndices[i] = true
}
}
// Transform set to slice.
indicesSlice := make([]uint64, 0, len(filteredIndices))
for i := range filteredIndices {
indicesSlice = append(indicesSlice, i)
}
// Sort the indices.
slices.Sort[[]uint64](indicesSlice)
return indicesSlice
}

View File

@@ -1,248 +0,0 @@
package peerdas_test
import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestDataColumnSidecars(t *testing.T) {
var expected []*ethpb.DataColumnSidecar = nil
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
require.NoError(t, err)
require.DeepSSZEqual(t, expected, actual)
}
func TestBlobs(t *testing.T) {
blobsIndice := map[uint64]bool{}
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
almostAllColumns = append(almostAllColumns, &ethpb.DataColumnSidecar{
Index: uint64(i),
})
}
testCases := []struct {
name string
input []*ethpb.DataColumnSidecar
expected []*blocks.VerifiedROBlob
err error
}{
{
name: "empty input",
input: []*ethpb.DataColumnSidecar{},
expected: nil,
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
},
{
name: "missing columns",
input: almostAllColumns,
expected: nil,
err: errors.New("some columns are missing: [0 1]"),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := peerdas.Blobs(blobsIndice, tc.input)
if tc.err != nil {
require.Equal(t, tc.err.Error(), err.Error())
} else {
require.NoError(t, err)
}
require.DeepSSZEqual(t, tc.expected, actual)
})
}
}
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
const blobCount = 5
blobsIndex := map[uint64]bool{}
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Generate random blobs and their corresponding commitments and proofs.
blobs := make([]kzg.Blob, 0, blobCount)
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
for blobIndex := range blobCount {
// Create a random blob.
blob := getRandBlob(int64(blobIndex))
blobs = append(blobs, blob)
// Generate a blobKZGCommitment for the blob.
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
require.NoError(t, err)
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
blobKzgProofs = append(blobKzgProofs, proof)
}
// Set the commitments into the block.
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
for _, blobKZGCommitment := range blobKzgCommitments {
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
}
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
// Generate verified RO blobs.
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
require.NoError(t, err)
for blobIndex := range blobCount {
blob := blobs[blobIndex]
blobKZGCommitment := blobKzgCommitments[blobIndex]
blobKzgProof := blobKzgProofs[blobIndex]
// Get the signed beacon block header.
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
require.NoError(t, err)
blobSidecar := &ethpb.BlobSidecar{
Index: uint64(blobIndex),
Blob: blob[:],
KzgCommitment: blobKZGCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: signedBeaconBlockHeader,
CommitmentInclusionProof: commitmentInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
require.NoError(t, err)
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
// Compute data columns sidecars from the signed beacon block and from the blobs.
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.NoError(t, err)
// Compute the blobs from the data columns sidecar.
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
require.NoError(t, err)
// Check that the blobs are the same.
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
}
func TestValidatorsCustodyRequirement(t *testing.T) {
testCases := []struct {
name string
count uint64
expected uint64
}{
{name: "0 validators", count: 0, expected: 8},
{name: "1 validator", count: 1, expected: 8},
{name: "8 validators", count: 8, expected: 8},
{name: "9 validators", count: 9, expected: 9},
{name: "100 validators", count: 100, expected: 100},
{name: "128 validators", count: 128, expected: 128},
{name: "129 validators", count: 129, expected: 128},
{name: "1000 validators", count: 1000, expected: 128},
}
const balance = uint64(32_000_000_000)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
balances := make([]uint64, 0, tc.count)
for range tc.count {
balances = append(balances, balance)
}
validatorsIndex := make(map[primitives.ValidatorIndex]bool)
for i := range tc.count {
validatorsIndex[primitives.ValidatorIndex(i)] = true
}
beaconState, err := state_native.InitializeFromProtoFulu(&ethpb.BeaconStateElectra{Balances: balances})
require.NoError(t, err)
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
require.NoError(t, err)
require.Equal(t, tc.expected, actual)
})
}
}
func TestCustodyGroupSamplingSize(t *testing.T) {
testCases := []struct {
name string
custodyType peerdas.CustodyType
validatorsCustodyRequirement uint64
toAdvertiseCustodyGroupCount uint64
expected uint64
}{
{
name: "target, lower than samples per slot",
custodyType: peerdas.Target,
validatorsCustodyRequirement: 2,
expected: 8,
},
{
name: "target, higher than samples per slot",
custodyType: peerdas.Target,
validatorsCustodyRequirement: 100,
expected: 100,
},
{
name: "actual, lower than samples per slot",
custodyType: peerdas.Actual,
validatorsCustodyRequirement: 3,
toAdvertiseCustodyGroupCount: 4,
expected: 8,
},
{
name: "actual, higher than samples per slot",
custodyType: peerdas.Actual,
validatorsCustodyRequirement: 100,
toAdvertiseCustodyGroupCount: 101,
expected: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create a custody info.
custodyInfo := peerdas.CustodyInfo{}
// Set the validators custody requirement for target custody group count.
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
// Set the to advertise custody group count.
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
// Compute the custody group sampling size.
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
// Check the result.
require.Equal(t, tc.expected, actual)
})
}
}

View File

@@ -1,186 +0,0 @@
package peerdas
import (
"encoding/binary"
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
// info contains all useful peerDAS related information regarding a peer.
type (
info struct {
CustodyGroups map[uint64]bool
CustodyColumns map[uint64]bool
DataColumnsSubnets map[uint64]bool
}
targetCustodyGroupCount struct {
mut sync.RWMutex
validatorsCustodyRequirement uint64
}
toAdverstiseCustodyGroupCount struct {
mut sync.RWMutex
value uint64
}
CustodyInfo struct {
// Mut is a mutex to be used by caller to ensure neither
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
// (This is not necessary to use this mutex for any data protection.)
Mut sync.RWMutex
// TargetGroupCount represents the target number of custody groups we should custody
// regarding the validators we are tracking.
TargetGroupCount targetCustodyGroupCount
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
}
)
const (
nodeInfoCacheSize = 200
nodeInfoCachKeySize = 32 + 8
)
var (
nodeInfoCacheMut sync.Mutex
nodeInfoCache *lru.Cache
)
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
// Create a new cache if it doesn't exist.
if err := createInfoCacheIfNeeded(); err != nil {
return nil, false, errors.Wrap(err, "create cache if needed")
}
// Compute the key.
key := computeInfoCacheKey(nodeID, custodyGroupCount)
// If the value is already in the cache, return it.
if value, ok := nodeInfoCache.Get(key); ok {
peerInfo, ok := value.(*info)
if !ok {
return nil, false, errors.New("failed to cast peer info (should never happen)")
}
return peerInfo, true, nil
}
// The peer info is not in the cache, compute it.
// Compute custody groups.
custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount)
if err != nil {
return nil, false, errors.Wrap(err, "custody groups")
}
// Compute custody columns.
custodyColumns, err := CustodyColumns(custodyGroups)
if err != nil {
return nil, false, errors.Wrap(err, "custody columns")
}
// Compute data columns subnets.
dataColumnsSubnets := DataColumnSubnets(custodyColumns)
result := &info{
CustodyGroups: custodyGroups,
CustodyColumns: custodyColumns,
DataColumnsSubnets: dataColumnsSubnets,
}
// Add the result to the cache.
nodeInfoCache.Add(key, result)
return result, false, nil
}
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
func createInfoCacheIfNeeded() error {
nodeInfoCacheMut.Lock()
defer nodeInfoCacheMut.Unlock()
if nodeInfoCache == nil {
c, err := lru.New(nodeInfoCacheSize)
if err != nil {
return errors.Wrap(err, "lru new")
}
nodeInfoCache = c
}
return nil
}
// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount.
func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte {
var key [nodeInfoCachKeySize]byte
copy(key[:32], nodeID[:])
binary.BigEndian.PutUint64(key[32:], custodyGroupCount)
return key
}
// setValidatorsCustodyRequirement sets the validators custody requirement.
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
tcgc.mut.Lock()
defer tcgc.mut.Unlock()
tcgc.validatorsCustodyRequirement = value
}
// CustodyGroupCount returns the number of groups we should participate in for custody.
func (tcgc *targetCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeToAllSubnets {
return params.BeaconConfig().NumberOfCustodyGroups
}
tcgc.mut.RLock()
defer tcgc.mut.RUnlock()
// If no validators are tracked, return the default custody requirement.
if tcgc.validatorsCustodyRequirement == 0 {
return params.BeaconConfig().CustodyRequirement
}
// Return the validators custody requirement.
return tcgc.validatorsCustodyRequirement
}
// Set sets the to advertise custody group count.
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
tacgc.mut.Lock()
defer tacgc.mut.Unlock()
tacgc.value = value
}
// Get returns the to advertise custody group count.
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeToAllSubnets {
return params.BeaconConfig().NumberOfCustodyGroups
}
custodyRequirement := params.BeaconConfig().CustodyRequirement
tacgc.mut.RLock()
defer tacgc.mut.RUnlock()
return max(tacgc.value, custodyRequirement)
}
// ActualGroupCount returns the actual custody group count.
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
}

View File

@@ -1,133 +0,0 @@
package peerdas_test
import (
"testing"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestInfo(t *testing.T) {
nodeID := enode.ID{}
custodyGroupCount := uint64(7)
expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
for _, cached := range []bool{false, true} {
actual, ok, err := peerdas.Info(nodeID, custodyGroupCount)
require.NoError(t, err)
require.Equal(t, cached, ok)
require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups)
require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns)
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
}
}
func TestTargetCustodyGroupCount(t *testing.T) {
testCases := []struct {
name string
subscribeToAllSubnets bool
validatorsCustodyRequirement uint64
expected uint64
}{
{
name: "subscribed to all subnets",
subscribeToAllSubnets: true,
validatorsCustodyRequirement: 100,
expected: 128,
},
{
name: "no validators attached",
subscribeToAllSubnets: false,
validatorsCustodyRequirement: 0,
expected: 4,
},
{
name: "some validators attached",
subscribeToAllSubnets: false,
validatorsCustodyRequirement: 100,
expected: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed.
if tc.subscribeToAllSubnets {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeToAllSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
}
var custodyInfo peerdas.CustodyInfo
// Set the validators custody requirement.
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
// Get the target custody group count.
actual := custodyInfo.TargetGroupCount.Get()
// Compare the expected and actual values.
require.Equal(t, tc.expected, actual)
})
}
}
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
testCases := []struct {
name string
subscribeToAllSubnets bool
toAdvertiseCustodyGroupCount uint64
expected uint64
}{
{
name: "subscribed to all subnets",
subscribeToAllSubnets: true,
toAdvertiseCustodyGroupCount: 100,
expected: 128,
},
{
name: "higher than custody requirement",
subscribeToAllSubnets: false,
toAdvertiseCustodyGroupCount: 100,
expected: 100,
},
{
name: "lower than custody requirement",
subscribeToAllSubnets: false,
toAdvertiseCustodyGroupCount: 1,
expected: 4,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed.
if tc.subscribeToAllSubnets {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeToAllSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
}
// Create a custody info.
var custodyInfo peerdas.CustodyInfo
// Set the to advertise custody group count.
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
// Get the to advertise custody group count.
actual := custodyInfo.ToAdvertiseGroupCount.Get()
// Compare the expected and actual values.
require.Equal(t, tc.expected, actual)
})
}
}

View File

@@ -1,14 +0,0 @@
package peerdas
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var dataColumnComputationTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_data_column_sidecar_computation_milliseconds",
Help: "Captures the time taken to compute data column sidecars from blobs.",
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
},
)

View File

@@ -1,126 +0,0 @@
package peerdas
import (
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
)
const (
CustodyGroupCountEnrKey = "cgc"
)
var (
// Custom errors
errRecordNil = errors.New("record is nil")
errCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
)
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#the-discovery-domain-discv5
type Cgc uint64
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
// Retrieve the number of columns.
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
commitments := make([]kzg.Bytes48, 0, count)
indices := make([]uint64, 0, count)
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
// Check if the columns index is not too large
if sidecar.Index >= numberOfColumns {
return false, errIndexTooLarge
}
// Check if the KZG commitments size and data column size match.
if len(sidecar.Column) != len(sidecar.KzgCommitments) {
return false, errMismatchLength
}
// Check if the KZG proofs size and data column size match.
if len(sidecar.Column) != len(sidecar.KzgProofs) {
return false, errMismatchLength
}
for i := range sidecar.Column {
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
indices = append(indices, sidecar.Index)
cells = append(cells, kzg.Cell(sidecar.Column[i]))
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
}
}
// Verify all the batch at once.
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
if err != nil {
return false, errors.Wrap(err, "verify cell KZG proof batch")
}
return verified, nil
}
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
return columnIndex % dataColumnSidecarSubnetCount
}
// DataColumnSubnets computes the subnets for the data columns.
func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool {
subnets := make(map[uint64]bool, len(dataColumns))
for column := range dataColumns {
subnet := ComputeSubnetForDataColumnSidecar(column)
subnets[subnet] = true
}
return subnets
}
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
if columnIndex >= numberOfColumns {
return 0, errIndexTooLarge
}
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
return columnIndex / columnsPerGroup, nil
}
// CustodyGroupCountFromRecord extracts the custody group count from an ENR record.
func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) {
if record == nil {
return 0, errRecordNil
}
// Load the `cgc`
var cgc Cgc
if cgc := record.Load(&cgc); cgc != nil {
return 0, errCannotLoadCustodyGroupCount
}
return uint64(cgc), nil
}

View File

@@ -1,57 +0,0 @@
package peerdas_test
import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
dbBlock := util.NewBeaconBlockDeneb()
require.NoError(t, kzg.Start())
var (
comms [][]byte
blobs []kzg.Blob
)
for i := int64(0); i < 6; i++ {
blob := getRandBlob(i)
commitment, _, err := generateCommitmentAndProof(&blob)
require.NoError(t, err)
comms = append(comms, commitment[:])
blobs = append(blobs, blob)
}
dbBlock.Block.Body.BlobKzgCommitments = comms
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
require.NoError(t, err)
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
sCars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
require.NoError(t, err)
for i, sidecar := range sCars {
roCol, err := blocks.NewRODataColumn(sidecar)
require.NoError(t, err)
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
require.NoError(t, err)
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
}
}
func TestCustodyGroupCountFromRecord(t *testing.T) {
const expected uint64 = 7
// Create an Ethereum record.
record := &enr.Record{}
record.Set(peerdas.Cgc(expected))
actual, err := peerdas.CustodyGroupCountFromRecord(record)
require.NoError(t, err)
require.Equal(t, expected, actual)
}

View File

@@ -1,56 +0,0 @@
package peerdas
import (
"math/big"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
// number of samples we should actually query from peers.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/peer-sampling.md#get_extended_sample_count
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
// Retrieve the columns count
columnsCount := params.BeaconConfig().NumberOfColumns
// If half of the columns are missing, we are able to reconstruct the data.
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
// This is the smallest worst case.
worstCaseMissing := columnsCount/2 + 1
// Compute the false positive threshold.
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
var sampleCount uint64
// Finally, compute the extended sample count.
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
break
}
}
return sampleCount
}
// HypergeomCDF computes the hypergeometric cumulative distribution function.
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
func HypergeomCDF(k, M, n, N uint64) float64 {
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
denominator := new(big.Float).SetInt(denominatorInt)
rBig := big.NewFloat(0)
for i := uint64(0); i < k+1; i++ {
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
numeratorInt := new(big.Int).Mul(a, b)
numerator := new(big.Float).SetInt(numeratorInt)
item := new(big.Float).Quo(numerator, denominator)
rBig.Add(rBig, item)
}
r, _ := rBig.Float64()
return r
}

View File

@@ -1,60 +0,0 @@
package peerdas_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestExtendedSampleCount(t *testing.T) {
const samplesPerSlot = 16
testCases := []struct {
name string
allowedMissings uint64
extendedSampleCount uint64
}{
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
require.Equal(t, tc.extendedSampleCount, result)
})
}
}
func TestHypergeomCDF(t *testing.T) {
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
// Expected result: 0.072
const (
expected = 0.0796665913283742
margin = 0.000001
)
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
}

View File

@@ -1,147 +0,0 @@
package peerdas
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"golang.org/x/sync/errgroup"
)
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count.
func CanSelfReconstruct(custodyGroupCount uint64) bool {
total := params.BeaconConfig().NumberOfCustodyGroups
// If total is odd, then we need total / 2 + 1 columns to reconstruct.
// If total is even, then we need total / 2 columns to reconstruct.
custodyGroupsNeeded := total/2 + total%2
return custodyGroupCount >= custodyGroupsNeeded
}
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
func RecoverCellsAndProofs(
dataColumnSideCars []*ethpb.DataColumnSidecar,
blockRoot [fieldparams.RootLength]byte,
) ([]kzg.CellsAndProofs, error) {
var wg errgroup.Group
dataColumnSideCarsCount := len(dataColumnSideCars)
if dataColumnSideCarsCount == 0 {
return nil, errors.New("no data column sidecars")
}
// Check if all columns have the same length.
blobCount := len(dataColumnSideCars[0].Column)
for _, sidecar := range dataColumnSideCars {
length := len(sidecar.Column)
if length != blobCount {
return nil, errors.New("columns do not have the same length")
}
}
// Recover cells and compute proofs in parallel.
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
bIndex := blobIndex
wg.Go(func() error {
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
for _, sidecar := range dataColumnSideCars {
// Build the cell indices.
cellsIndices = append(cellsIndices, sidecar.Index)
// Get the cell.
column := sidecar.Column
cell := column[bIndex]
cells = append(cells, kzg.Cell(cell))
}
// Recover the cells and proofs for the corresponding blob
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
if err != nil {
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
}
recoveredCellsAndProofs[bIndex] = cellsAndProofs
return nil
})
}
if err := wg.Wait(); err != nil {
return nil, err
}
return recoveredCellsAndProofs, nil
}
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
// It is scheduled for deletion.
func DataColumnSidecarsForReconstruct(
blobKzgCommitments [][]byte,
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
kzgCommitmentsInclusionProof [][]byte,
cellsAndProofs []kzg.CellsAndProofs,
) ([]*ethpb.DataColumnSidecar, error) {
// Each CellsAndProofs corresponds to a Blob
// So we can get the BlobCount by checking the length of CellsAndProofs
blobsCount := len(cellsAndProofs)
if blobsCount == 0 {
return nil, nil
}
// Get the column sidecars.
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
for columnIndex := range fieldparams.NumberOfColumns {
column := make([]kzg.Cell, 0, blobsCount)
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
for rowIndex := range blobsCount {
cellsForRow := cellsAndProofs[rowIndex].Cells
proofsForRow := cellsAndProofs[rowIndex].Proofs
if len(cellsForRow) != fieldparams.NumberOfColumns {
return nil, errors.Errorf("cells don't have the expected size: expected %d - actual %d", fieldparams.NumberOfColumns, len(cellsForRow))
}
cell := cellsForRow[columnIndex]
column = append(column, cell)
if len(proofsForRow) != fieldparams.NumberOfColumns {
return nil, errors.Errorf("proofs don't have the expected size: expected %d - actual %d", fieldparams.NumberOfColumns, len(proofsForRow))
}
kzgProof := proofsForRow[columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
columnBytes = append(columnBytes, column[i][:])
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
copiedProof := kzgProof
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
Index: uint64(columnIndex),
Column: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProofs: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
return sidecars, nil
}

View File

@@ -1,209 +0,0 @@
package peerdas_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestCanSelfReconstruct(t *testing.T) {
testCases := []struct {
name string
totalNumberOfCustodyGroups uint64
custodyNumberOfGroups uint64
expected bool
}{
{
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31",
totalNumberOfCustodyGroups: 64,
custodyNumberOfGroups: 31,
expected: false,
},
{
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32",
totalNumberOfCustodyGroups: 64,
custodyNumberOfGroups: 32,
expected: true,
},
{
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32",
totalNumberOfCustodyGroups: 65,
custodyNumberOfGroups: 32,
expected: false,
},
{
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
totalNumberOfCustodyGroups: 65,
custodyNumberOfGroups: 33,
expected: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Set the total number of columns.
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups
params.OverrideBeaconConfig(cfg)
// Check if reconstuction is possible.
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups)
require.Equal(t, tc.expected, actual)
})
}
}
func TestReconstructionRoundTrip(t *testing.T) {
params.SetupTestConfigCleanup(t)
const blobCount = 5
var blockRoot [fieldparams.RootLength]byte
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
require.NoError(t, kzg.Start())
// Generate random blobs and their corresponding commitments.
var (
blobsKzgCommitments [][]byte
blobs []kzg.Blob
)
for i := range blobCount {
blob := getRandBlob(int64(i))
commitment, _, err := generateCommitmentAndProof(&blob)
require.NoError(t, err)
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
blobs = append(blobs, blob)
}
// Generate a signed beacon block.
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Get the signed beacon block header.
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
require.NoError(t, err)
// Convert data columns sidecars from signed block and blobs.
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.NoError(t, err)
// Create verified RO data columns.
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
for _, dataColumnSidecar := range dataColumnSidecars {
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
require.NoError(t, err)
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
}
verifiedRoDataColumn := verifiedRoDataColumns[0]
numberOfColumns := params.BeaconConfig().NumberOfColumns
var noDataColumns []*ethpb.DataColumnSidecar
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
{Column: [][]byte{{}, {}}},
{Column: [][]byte{{}}},
}
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
allDataColumns := dataColumnSidecars
for i, dataColumn := range dataColumnSidecars {
if i%2 == 0 {
evenDataColumns = append(evenDataColumns, dataColumn)
} else {
oddDataColumns = append(oddDataColumns, dataColumn)
}
}
testCases := []struct {
name string
dataColumnsSidecar []*ethpb.DataColumnSidecar
isError bool
}{
{
name: "No data columns sidecars",
dataColumnsSidecar: noDataColumns,
isError: true,
},
{
name: "Data columns sidecar with different lengths",
dataColumnsSidecar: dataColumnsWithDifferentLengths,
isError: true,
},
{
name: "All columns are present (no actual need to reconstruct)",
dataColumnsSidecar: allDataColumns,
isError: false,
},
{
name: "Only original columns are present",
dataColumnsSidecar: originalDataColumns,
isError: false,
},
{
name: "Only extended columns are present",
dataColumnsSidecar: extendedDataColumns,
isError: false,
},
{
name: "Only even columns are present",
dataColumnsSidecar: evenDataColumns,
isError: false,
},
{
name: "Only odd columns are present",
dataColumnsSidecar: oddDataColumns,
isError: false,
},
{
name: "Not enough columns to reconstruct",
dataColumnsSidecar: notEnoughDataColumns,
isError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Recover cells and proofs from available data columns sidecars.
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
isError := (err != nil)
require.Equal(t, tc.isError, isError)
if isError {
return
}
// Recover all data columns sidecars from cells and proofs.
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
blobsKzgCommitments,
signedBeaconBlockHeader,
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
cellsAndProofs,
)
require.NoError(t, err)
expected := dataColumnSidecars
actual := reconstructedDataColumnsSideCars
require.DeepSSZEqual(t, expected, actual)
})
}
}

View File

@@ -1,57 +0,0 @@
package peerdas
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs.
// This is a convenience method as blob and cell proofs are common inputs.
func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
// Check if the block is at least a Fulu block.
if block.Version() < version.Fulu {
return nil, nil
}
cellsAndProofs, err := constructCellsAndProofs(blobs, cellProofs)
if err != nil {
return nil, err
}
return DataColumnSidecars(block, cellsAndProofs)
}
func constructCellsAndProofs(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
numColumns := int(params.BeaconConfig().NumberOfColumns)
if len(blobs)*numColumns != len(cellProofs) {
return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numColumns, len(cellProofs))
}
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs))
for i, blob := range blobs {
var b kzg.Blob
copy(b[:], blob)
cells, err := kzg.ComputeCells(&b)
if err != nil {
return nil, err
}
var proofs []kzg.Proof
for idx := i * numColumns; idx < (i+1)*numColumns; idx++ {
proofs = append(proofs, kzg.Proof(cellProofs[idx]))
}
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
Cells: cells,
Proofs: proofs,
})
}
return cellsAndProofs, nil
}

View File

@@ -1,57 +0,0 @@
package peerdas_test
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/sirupsen/logrus"
)
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
commitment, err := kzg.BlobToKZGCommitment(blob)
if err != nil {
return nil, nil, err
}
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
if err != nil {
return nil, nil, err
}
return &commitment, &proof, err
}
// Returns a random blob using the passed seed as entropy
func getRandBlob(seed int64) kzg.Blob {
var blob kzg.Blob
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
fieldElementBytes := getRandFieldElement(seed + int64(i))
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
}
return blob
}
// Returns a serialized random field element in big-endian
func getRandFieldElement(seed int64) [32]byte {
bytes := deterministicRandomness(seed)
var r fr.Element
r.SetBytes(bytes[:])
return GoKZG.SerializeScalar(r)
}
func deterministicRandomness(seed int64) [32]byte {
// Converts an int64 to a byte slice
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, seed)
if err != nil {
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
return [32]byte{}
}
bytes := buf.Bytes()
return sha256.Sum256(bytes)
}

View File

@@ -53,11 +53,6 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
}
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
func PeerDASIsActive(slot primitives.Slot) bool {
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
}
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH

View File

@@ -4,26 +4,21 @@ go_library(
name = "go_default_library",
srcs = [
"availability.go",
"availability_columns.go",
"cache.go",
"data_column_cache.go",
"iface.go",
"mock.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -32,18 +27,13 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"availability_columns_test.go",
"availability_test.go",
"cache_test.go",
"data_column_cache_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -51,7 +41,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -53,16 +53,10 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStore) Persist(current primitives.Slot, scg ...blocks.ROSidecar) error {
if len(scg) == 0 {
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
if len(sc) == 0 {
return nil
}
sc, err := blocks.BlobSidecarsFromSidecars(scg)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
if len(sc) > 1 {
first := sc[0].BlockRoot()
for i := 1; i < len(sc); i++ {

View File

@@ -1,181 +0,0 @@
package das
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStoreColumn struct {
store *filesystem.DataColumnStorage
nodeID enode.ID
cache *dataColumnCache
custodyInfo *peerdas.CustodyInfo
}
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
nodeID: nodeID,
cache: newDataColumnCache(),
custodyInfo: custodyInfo,
}
}
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
if len(sidecars) == 0 {
return nil
}
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
// It is safe to retrieve the first sidecar.
firstSidecar := dataColumnSidecars[0]
if len(sidecars) > 1 {
firstRoot := firstSidecar.BlockRoot()
for _, sidecar := range dataColumnSidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
return nil
}
key := dataColumnCacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
entry := s.cache.ensure(key)
for i := range sidecars {
if err := entry.stash(&dataColumnSidecars[i]); err != nil {
return errors.Wrap(err, "stash DataColumnSidecar")
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
ctx context.Context,
currentSlot primitives.Slot,
block blocks.ROBlock,
) error {
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
if err != nil {
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
}
// Return early for blocks that do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
// Get the root of the block.
blockRoot := block.Root()
// Build the cache key for the block.
key := dataColumnCacheKey{slot: block.Block().Slot(), root: blockRoot}
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
entry := s.cache.ensure(key)
// Delete the cache entry for the block at the end.
defer s.cache.delete(key)
// Set the disk summary for the block in the cache entry.
entry.setDiskSummary(s.store.Summary(blockRoot))
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
if err != nil {
return errors.Wrap(err, "incomplete DataColumnSidecar batch")
}
// Create verified RO data columns from RO data columns.
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
for _, roDataColumn := range roDataColumns {
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
}
// Ensure that column sidecars are written to disk.
if err := s.store.Save(verifiedRODataColumns); err != nil {
return errors.Wrapf(err, "save data column sidecars")
}
// All ColumnSidecars are persisted - data availability check succeeds.
return nil
}
// fullCommitmentsToCheck returns the commitments to check for a given block.
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
// Return early for blocks that are pre-Fulu.
if block.Version() < version.Fulu {
return &safeCommitmentsArray{}, nil
}
// Compute the block epoch.
blockSlot := block.Block().Slot()
blockEpoch := slots.ToEpoch(blockSlot)
// Compute the current spoch.
currentEpoch := slots.ToEpoch(currentSlot)
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return &safeCommitmentsArray{}, nil
}
// Retrieve the KZG commitments for the block.
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Return early if there are no commitments in the block.
if len(kzgCommitments) == 0 {
return &safeCommitmentsArray{}, nil
}
// Retrieve the groups count.
custodyGroupCount := s.custodyInfo.ActualGroupCount()
// Retrieve peer info.
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
// Create a safe commitments array for the custody columns.
commitmentsArray := &safeCommitmentsArray{}
for column := range peerInfo.CustodyColumns {
commitmentsArray[column] = kzgCommitments
}
return commitmentsArray, nil
}

View File

@@ -1,254 +0,0 @@
package das
import (
"context"
"testing"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot verification.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
for _, roDataColumn := range roDataColumns {
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
}
return roSidecars, roDataColumns
}
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
}
var commitments = [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
func TestPersist(t *testing.T) {
t.Run("no sidecars", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.Persist(0)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("mixed roots", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
{1}: {{ColumnIndex: 1}},
{2}: {{ColumnIndex: 2}},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.ErrorIs(t, err, errMixedRoots)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("outside DA period", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
{1}: {{ColumnIndex: 1}},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("nominal", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
}
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.NoError(t, err)
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
key := dataColumnCacheKey{slot: 0, root: [32]byte{}}
entry := lazilyPersistentStoreColumns.cache.entries[key]
// A call to Persist does NOT save the sidecars to disk.
require.Equal(t, uint64(0), entry.diskSummary.Count())
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
for i, roDataColumn := range entry.scs {
if map[int]bool{1: true, 5: true}[i] {
continue
}
require.IsNil(t, roDataColumn)
}
})
}
func TestIsDataAvailable(t *testing.T) {
t.Run("No commitments", func(t *testing.T) {
ctx := context.Background()
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
})
t.Run("Some sidecars are not available", func(t *testing.T) {
ctx := context.Background()
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NotNil(t, err)
})
t.Run("All sidecars are available", func(t *testing.T) {
ctx := context.Background()
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
root := signedRoBlock.Root()
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
indices := [...]uint64{1, 17, 87, 102}
dataColumnsParams := make([]verification.DataColumnParams, 0, len(indices))
for _, index := range indices {
dataColumnParams := verification.DataColumnParams{
ColumnIndex: index,
KzgCommitments: commitments,
}
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
}
dataColumnsParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
_, verifiedRoDataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
key := dataColumnCacheKey{root: root}
entry := lazilyPersistentStoreColumns.cache.ensure(key)
defer lazilyPersistentStoreColumns.cache.delete(key)
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
require.NoError(t, err)
}
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
actual, err := dataColumnStorage.Get(root, indices[:])
require.NoError(t, err)
summary := dataColumnStorage.Summary(root)
require.Equal(t, uint64(len(indices)), summary.Count())
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
})
}
func TestFullCommitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
require.NoError(t, err)
testCases := []struct {
name string
commitments [][]byte
block func(*testing.T) blocks.ROBlock
slot primitives.Slot
}{
{
name: "Pre-Fulu block",
block: func(t *testing.T) blocks.ROBlock {
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
},
},
{
name: "Commitments outside data availability window",
block: func(t *testing.T) blocks.ROBlock {
beaconBlockElectra := util.NewBeaconBlockElectra()
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
return newSignedRoBlock(t, beaconBlockElectra)
},
slot: windowSlots + 1,
},
{
name: "Commitments within data availability window",
block: func(t *testing.T) blocks.ROBlock {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedBeaconBlockFulu.Block.Slot = 100
return newSignedRoBlock(t, signedBeaconBlockFulu)
},
commitments: commitments,
slot: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeToAllSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
b := tc.block(t)
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, &peerdas.CustodyInfo{})
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
require.NoError(t, err)
for _, commitments := range commitmentsArray {
require.DeepEqual(t, tc.commitments, commitments)
}
})
}
}

View File

@@ -116,11 +116,9 @@ func TestLazilyPersistent_Missing(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
@@ -143,14 +141,12 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[0]))
err := as.IsDataAvailable(ctx, 1, blk)
@@ -159,10 +155,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
}
func TestLazyPersistOnceCommitted(t *testing.T) {
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(1, scs...))
@@ -170,13 +163,10 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
// ignores index out of bound
blobSidecars[0].Index = 6
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
scs[0].Index = 6
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
// ignores sidecars before the retention period
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)

View File

@@ -10,7 +10,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
var errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
var (
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
)
// cacheKey includes the slot so that we can easily iterate through the cache and compare
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have

View File

@@ -1,135 +0,0 @@
package das
import (
"reflect"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
var (
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
errColumnIndexTooHigh = errors.New("column index too high")
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
)
// dataColumnCacheKey includes the slot so that we can easily iterate through the cache and compare
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
type dataColumnCacheKey struct {
slot primitives.Slot
root [32]byte
}
type dataColumnCache struct {
entries map[dataColumnCacheKey]*dataColumnCacheEntry
}
func newDataColumnCache() *dataColumnCache {
return &dataColumnCache{entries: make(map[dataColumnCacheKey]*dataColumnCacheEntry)}
}
// ensure returns the entry for the given key, creating it if it isn't already present.
func (c *dataColumnCache) ensure(key dataColumnCacheKey) *dataColumnCacheEntry {
entry, ok := c.entries[key]
if !ok {
entry = &dataColumnCacheEntry{}
c.entries[key] = entry
}
return entry
}
// delete removes the cache entry from the cache.
func (c *dataColumnCache) delete(key dataColumnCacheKey) {
delete(c.entries, key)
}
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
type dataColumnCacheEntry struct {
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
diskSummary filesystem.DataColumnStorageSummary
}
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
e.diskSummary = sum
}
// stash adds an item to the in-memory cache of DataColumnSidecars.
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
if sc.Index >= fieldparams.NumberOfColumns {
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
}
if e.scs[sc.Index] != nil {
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
}
e.scs[sc.Index] = sc
return nil
}
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
if e.diskSummary.AllAvailable(nonEmptyIndices) {
return nil, nil
}
commitmentsCount := commitmentsArray.count()
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
for i := range nonEmptyIndices {
if e.diskSummary.HasIndex(i) {
continue
}
if e.scs[i] == nil {
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
}
if !reflect.DeepEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
}
sidecars = append(sidecars, *e.scs[i])
}
return sidecars, nil
}
// safeCommitmentsArray is a fixed size array of commitments.
// This is helpful for avoiding gratuitous bounds checks.
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
// count returns the number of commitments in the array.
func (s *safeCommitmentsArray) count() int {
count := 0
for i := range s {
if s[i] != nil {
count++
}
}
return count
}
// nonEmptyIndices returns a map of indices that are non-nil in the array.
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
columns := make(map[uint64]bool)
for i := range s {
if s[i] != nil {
columns[uint64(i)] = true
}
}
return columns
}

View File

@@ -1,124 +0,0 @@
package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
c := newDataColumnCache()
key := dataColumnCacheKey{}
entry := c.ensure(key)
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
entry.setDiskSummary(diskSummary)
entry = c.ensure(key)
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
c.delete(key)
entry = c.ensure(key)
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
}
func TestStash(t *testing.T) {
t.Run("Index too high", func(t *testing.T) {
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
var entry dataColumnCacheEntry
err := entry.stash(&roDataColumns[0])
require.NotNil(t, err)
})
t.Run("Nominal and already existing", func(t *testing.T) {
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
var entry dataColumnCacheEntry
err := entry.stash(&roDataColumns[0])
require.NoError(t, err)
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
err = entry.stash(&roDataColumns[0])
require.NotNil(t, err)
})
}
func TestFilterDataColumns(t *testing.T) {
t.Run("All available", func(t *testing.T) {
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
require.NoError(t, err)
require.IsNil(t, actual)
})
t.Run("Some scs missing", func(t *testing.T) {
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
require.NotNil(t, err)
})
t.Run("Commitments not equal", func(t *testing.T) {
root := [fieldparams.RootLength]byte{}
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
scs[1] = &roDataColumns[0]
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
require.NotNil(t, err)
})
t.Run("Nominal", func(t *testing.T) {
root := [fieldparams.RootLength]byte{}
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
expected, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
scs[3] = &expected[0]
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
require.NoError(t, err)
require.DeepEqual(t, expected, actual)
})
}
func TestCount(t *testing.T) {
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
require.Equal(t, 2, s.count())
}
func TestNonEmptyIndices(t *testing.T) {
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
actual := s.nonEmptyIndices()
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
}

View File

@@ -15,5 +15,5 @@ import (
// durably persisted before returning a non-error value.
type AvailabilityStore interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
}

View File

@@ -3,7 +3,6 @@ package das
import (
"context"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
}
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
if m.PersistBlobsCallback != nil {
return m.PersistBlobsCallback(current, blobSidecars...)
return m.PersistBlobsCallback(current, sc...)
}
return nil
}

View File

@@ -5,9 +5,6 @@ go_library(
srcs = [
"blob.go",
"cache.go",
"data_column.go",
"data_column_cache.go",
"doc.go",
"iteration.go",
"layout.go",
"layout_by_epoch.go",
@@ -20,8 +17,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
visibility = ["//visibility:public"],
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
@@ -30,7 +25,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
@@ -47,8 +41,6 @@ go_test(
srcs = [
"blob_test.go",
"cache_test.go",
"data_column_cache_test.go",
"data_column_test.go",
"iteration_test.go",
"layout_test.go",
"migration_test.go",
@@ -58,7 +50,6 @@ go_test(
deps = [
"//beacon-chain/db:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -1,999 +0,0 @@
package filesystem
import (
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/spf13/afero"
)
const (
version = 0x01
versionOffset = 0 // bytes
versionSize = 1 // bytes
maxSszEncodedDataColumnSidecarSize = 536_870_912 // 2**(4*8) / 8 bytes
encodedSszEncodedDataColumnSidecarSizeOffset = versionOffset + versionSize
encodedSszEncodedDataColumnSidecarSizeSize = 4 // bytes (size of the encoded size of the SSZ encoded data column sidecar)
mandatoryNumberOfColumns = 128 // 2**7
indicesOffset = encodedSszEncodedDataColumnSidecarSizeOffset + encodedSszEncodedDataColumnSidecarSizeSize
indicesSize = mandatoryNumberOfColumns
nonZeroOffset = mandatoryNumberOfColumns
headerSize = versionSize + encodedSszEncodedDataColumnSidecarSizeSize + mandatoryNumberOfColumns
dataColumnsFileExtension = "sszs"
prunePeriod = 1 * time.Minute
)
var (
errWrongNumberOfColumns = errors.New("wrong number of data columns")
errDataColumnIndexTooLarge = errors.New("data column index too large")
errWrongBytesWritten = errors.New("wrong number of bytes written")
errWrongVersion = errors.New("wrong version")
errWrongBytesHeaderRead = errors.New("wrong number of bytes header read")
errTooManyDataColumns = errors.New("too many data columns")
errWrongSszEncodedDataColumnSidecarSize = errors.New("wrong SSZ encoded data column sidecar size")
errDataColumnSidecarsFromDifferentSlots = errors.New("data column sidecars from different slots")
)
type (
// DataColumnStorage is the concrete implementation of the filesystem backend for saving and retrieving DataColumnSidecars.
DataColumnStorage struct {
base string
retentionEpochs primitives.Epoch
fs afero.Fs
cache *dataColumnStorageSummaryCache
DataColumnFeed *event.Feed
muChans map[[fieldparams.RootLength]byte]*muChan
mu sync.Mutex // protect muChans
pruneMu sync.RWMutex
}
// DataColumnStorageOption is a functional option for configuring a DataColumnStorage.
DataColumnStorageOption func(*DataColumnStorage) error
// DataColumnIdent is the unique identifier for a data column sidecar.
DataColumnsIdent struct {
Root [fieldparams.RootLength]byte
Epoch primitives.Epoch
Indices []uint64
}
storageIndices struct {
indices [mandatoryNumberOfColumns]byte
}
metadata struct {
indices *storageIndices
savedDataColumnSidecarCount int64
sszEncodedDataColumnSidecarSize uint32
fileSize int64
}
fileMetadata struct {
period uint64
epoch primitives.Epoch
blockRoot [fieldparams.RootLength]byte
}
muChan struct {
mu *sync.RWMutex
toStore chan []blocks.VerifiedRODataColumn
}
)
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
func WithDataColumnBasePath(base string) DataColumnStorageOption {
return func(b *DataColumnStorage) error {
b.base = base
return nil
}
}
// WithDataColumnRetentionEpochs is an option that changes the number of epochs data columns will be persisted.
func WithDataColumnRetentionEpochs(e primitives.Epoch) DataColumnStorageOption {
return func(b *DataColumnStorage) error {
b.retentionEpochs = e
return nil
}
}
// WithDataColumnFs allows the afero.Fs implementation to be customized.
// Used by tests to substitute an in-memory filesystem.
func WithDataColumnFs(fs afero.Fs) DataColumnStorageOption {
return func(b *DataColumnStorage) error {
b.fs = fs
return nil
}
}
// NewDataColumnStorage creates a new instance of the DataColumnStorage object. Note that the implementation of DataColumnStorage may
// attempt to hold a file lock to guarantee exclusive control of the data column storage directory, so this should only be
// initialized once per beacon node.
func NewDataColumnStorage(ctx context.Context, opts ...DataColumnStorageOption) (*DataColumnStorage, error) {
storage := &DataColumnStorage{
DataColumnFeed: new(event.Feed),
muChans: make(map[[fieldparams.RootLength]byte]*muChan),
}
for _, o := range opts {
if err := o(storage); err != nil {
return nil, errors.Wrap(err, "failed to create data column storage")
}
}
// Allow tests to set up a different fs using WithFs.
if storage.fs == nil {
if storage.base == "" {
return nil, errNoBasePath
}
storage.base = path.Clean(storage.base)
if err := file.MkdirAll(storage.base); err != nil {
return nil, errors.Wrapf(err, "failed to create data column storage at %s", storage.base)
}
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
}
storage.cache = newDataColumnStorageSummaryCache()
async.RunEvery(ctx, prunePeriod, func() {
storage.pruneMu.Lock()
defer storage.pruneMu.Unlock()
storage.prune()
})
return storage, nil
}
// WarmCache warms the cache of the data column filesystem.
// It holds the database (read) lock for all the time it is running.
func (dcs *DataColumnStorage) WarmCache() {
start := time.Now()
log.Info("Data column filesystem cache warm-up started")
dcs.pruneMu.Lock()
defer dcs.pruneMu.Unlock()
highestStoredEpoch := primitives.Epoch(0)
// Walk the data column filesystem to warm up the cache.
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
if fileErr != nil {
return fileErr
}
// If not a leaf, skip.
if info.IsDir() {
return nil
}
// Extract metadata from the file path.
fileMetadata, err := extractFileMetadata(path)
if err != nil {
log.WithError(err).Error("Error encountered while extracting file metadata")
}
// Open the data column filesystem file.
file, err := dcs.fs.Open(path)
if err != nil {
log.WithError(err).Error("Error encountered while opening data column filesystem file")
return nil
}
// Close the file.
defer func() {
// Overwrite the existing error only if it is nil, since the close error is less important.
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read the metadata of the file.
metadata, err := dcs.metadata(file)
if err != nil {
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
return nil
}
// Check the indices.
indices := metadata.indices.all()
if len(indices) == 0 {
return nil
}
// Build the ident.
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
// Update the highest stored epoch.
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
// Set the ident in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
}
return nil
}); err != nil {
log.WithError(err).Error("Error encountered while walking data column filesystem.")
}
// Prune the cache and the filesystem.
dcs.prune()
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
}
// Summary returns the DataColumnStorageSummary.
func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
return dcs.cache.Summary(root)
}
// Save saves data column sidecars into the database and asynchronously performs pruning.
// The returned chanel is closed when the pruning is complete.
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
startTime := time.Now()
if len(dataColumnSidecars) == 0 {
return nil
}
// Check the number of columns is the one expected.
// While implementing this, we expect the number of columns won't change.
// If it does, we will need to create a new version of the data column sidecar file.
if params.BeaconConfig().NumberOfColumns != mandatoryNumberOfColumns {
return errWrongNumberOfColumns
}
highestEpoch := primitives.Epoch(0)
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
// Group data column sidecars by root.
for _, dataColumnSidecar := range dataColumnSidecars {
// Check if the data column index is too large.
if dataColumnSidecar.Index >= mandatoryNumberOfColumns {
return errDataColumnIndexTooLarge
}
// Group data column sidecars by root.
root := dataColumnSidecar.BlockRoot()
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
}
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
// Safety check all data column sidecars for this root are from the same slot.
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
return errDataColumnSidecarsFromDifferentSlots
}
}
// Set the highest epoch.
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
highestEpoch = max(highestEpoch, epoch)
// Save data columns in the filesystem.
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
return errors.Wrap(err, "save filesystem")
}
// Get all indices.
indices := make([]uint64, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
indices = append(indices, dataColumnSidecar.Index)
}
// Compute the data columns ident.
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
// Set data columns in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {
return errors.Wrap(err, "cache set")
}
// Notify the data column feed.
dcs.DataColumnFeed.Send(dataColumnsIdent)
}
dataColumnSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// saveFilesystem saves data column sidecars into the database.
// This function expects all data column sidecars to belong to the same block.
func (dcs *DataColumnStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, dataColumnSidecars []blocks.VerifiedRODataColumn) error {
// Compute the file path.
filePath := filePath(root, epoch)
dcs.pruneMu.RLock()
defer dcs.pruneMu.RUnlock()
fileMu, toStore := dcs.fileMutex(root)
toStore <- dataColumnSidecars
fileMu.Lock()
defer fileMu.Unlock()
// Check if the file exists.
exists, err := afero.Exists(dcs.fs, filePath)
if err != nil {
return errors.Wrap(err, "afero exists")
}
if exists {
if err := dcs.saveDataColumnSidecarsExistingFile(filePath, toStore); err != nil {
return errors.Wrap(err, "save data column existing file")
}
return nil
}
if err := dcs.saveDataColumnSidecarsNewFile(filePath, toStore); err != nil {
return errors.Wrap(err, "save data columns new file")
}
return nil
}
// Get retrieves data column sidecars from the database.
// If one of the requested data column sidecars is not found, it is just skipped.
// If indices is nil, then all stored data column sidecars are returned.
// Since DataColumnStorage only writes data columns that have undergone full verification, the return
// value is always a VerifiedRODataColumn.
func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
dcs.pruneMu.RLock()
defer dcs.pruneMu.RUnlock()
fileMu, _ := dcs.fileMutex(root)
fileMu.RLock()
defer fileMu.RUnlock()
startTime := time.Now()
// Build all indices if none are provided.
if indices == nil {
indices = make([]uint64, mandatoryNumberOfColumns)
for i := range indices {
indices[i] = uint64(i)
}
}
summary, ok := dcs.cache.get(root)
if !ok {
// Nothing found in db. Exit early.
return nil, nil
}
// Compute the file path.
filePath := filePath(root, summary.epoch)
// Open the data column sidecars file.
// We do not specially check if the file exists since we have already checked the cache.
file, err := dcs.fs.Open(filePath)
if err != nil {
return nil, errors.Wrap(err, "data column sidecars file path open")
}
// Read file metadata.
metadata, err := dcs.metadata(file)
if err != nil {
return nil, errors.Wrap(err, "metadata")
}
// Retrieve data column sidecars from the file.
verifiedRODataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(indices))
for _, index := range indices {
ok, position, err := metadata.indices.get(index)
if err != nil {
return nil, errors.Wrap(err, "get index")
}
// Skip if the data column is not saved.
if !ok {
continue
}
// Compute the offset of the data column sidecar.
offset := headerSize + position*int64(metadata.sszEncodedDataColumnSidecarSize)
// Seek to the beginning of the data column sidecar.
_, err = file.Seek(offset, io.SeekStart)
if err != nil {
return nil, errors.Wrap(err, "seek")
}
verifiedRODataColumn, err := verification.VerifiedRODataColumnFromDisk(file, root, metadata.sszEncodedDataColumnSidecarSize)
if err != nil {
return nil, errors.Wrap(err, "verified RO data column from disk")
}
// Append the verified RO data column to the data column sidecars.
verifiedRODataColumnSidecars = append(verifiedRODataColumnSidecars, verifiedRODataColumn)
}
dataColumnFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return verifiedRODataColumnSidecars, nil
}
// Remove deletes all data column sidecars for a given root.
func (dcs *DataColumnStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
dcs.pruneMu.RLock()
defer dcs.pruneMu.RUnlock()
fileMu, _ := dcs.fileMutex(blockRoot)
fileMu.Lock()
defer fileMu.Unlock()
summary, ok := dcs.cache.get(blockRoot)
if !ok {
// Nothing found in db. Exit early.
return nil
}
// Remove the data column sidecars from the cache.
dcs.cache.evict(blockRoot)
// Remove the data column sidecars file.
filePath := filePath(blockRoot, summary.epoch)
if err := dcs.fs.Remove(filePath); err != nil {
return errors.Wrap(err, "remove")
}
return nil
}
// Clear deletes all files on the filesystem.
func (dcs *DataColumnStorage) Clear() error {
dcs.pruneMu.Lock()
defer dcs.pruneMu.Unlock()
dirs, err := listDir(dcs.fs, ".")
if err != nil {
return errors.Wrap(err, "list dir")
}
dcs.cache.clear()
for _, dir := range dirs {
if err := dcs.fs.RemoveAll(dir); err != nil {
return errors.Wrap(err, "remove all")
}
}
return nil
}
// prune clean the cache, the filesystem and mutexes.
func (dcs *DataColumnStorage) prune() {
dcs.mu.Lock()
defer dcs.mu.Unlock()
highestStoredEpoch := dcs.cache.HighestEpoch()
// Check if we need to prune.
if highestStoredEpoch < dcs.retentionEpochs {
return
}
highestEpochToPrune := highestStoredEpoch - dcs.retentionEpochs
highestPeriodToPrune := period(highestEpochToPrune)
// Prune the cache.
prunedCount := dcs.cache.pruneUpTo(highestEpochToPrune)
if prunedCount == 0 {
return
}
dataColumnPrunedCounter.Add(float64(prunedCount))
// Prune the filesystem.
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
if err != nil {
log.WithError(err).Error("Error encountered while reading top directory")
return
}
for _, periodFileInfo := range periodFileInfos {
periodStr := periodFileInfo.Name()
period, err := strconv.ParseUint(periodStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
continue
}
if period < highestPeriodToPrune {
// Remove everything lower thant highest period to prune.
if err := dcs.fs.RemoveAll(periodStr); err != nil {
log.WithError(err).Error("Error encountered while removing period directory")
}
continue
}
if period > highestPeriodToPrune {
// Do not remove anything higher than highest period to prune.
continue
}
// if period == highestPeriodToPrune
epochFileInfos, err := afero.ReadDir(dcs.fs, periodStr)
if err != nil {
log.WithError(err).Error("Error encountered while reading epoch directory")
continue
}
for _, epochFileInfo := range epochFileInfos {
epochStr := epochFileInfo.Name()
epochDir := path.Join(periodStr, epochStr)
epoch, err := strconv.ParseUint(epochStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
continue
}
if primitives.Epoch(epoch) > highestEpochToPrune {
continue
}
if err := dcs.fs.RemoveAll(epochDir); err != nil {
log.WithError(err).Error("Error encountered while removing epoch directory")
continue
}
}
}
clear(dcs.muChans)
}
// saveDataColumnSidecarsExistingFile saves data column sidecars into an existing file.
// This function expects all data column sidecars to belong to the same block.
func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
// Open the data column sidecars file.
file, err := dcs.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
if err != nil {
return errors.Wrap(err, "data column sidecars file path open")
}
defer func() {
closeErr := file.Close()
// Overwrite the existing error only if it is nil, since the close error is less important.
if closeErr != nil && err == nil {
err = closeErr
}
}()
metadata, err := dcs.metadata(file)
if err != nil {
return errors.Wrap(err, "metadata")
}
// Create the SSZ encoded data column sidecars.
var sszEncodedDataColumnSidecars []byte
for {
dataColumnSidecars := pullChan(inputDataColumnSidecars)
if len(dataColumnSidecars) == 0 {
break
}
for _, dataColumnSidecar := range dataColumnSidecars {
// Extract the data columns index.
dataColumnIndex := dataColumnSidecar.Index
ok, _, err := metadata.indices.get(dataColumnIndex)
if err != nil {
return errors.Wrap(err, "get index")
}
// Skip if the data column is already saved.
if ok {
continue
}
// Check if the number of saved data columns is too large.
// This is impossible to happen in practice is this function is called
// by SaveDataColumnSidecars.
if metadata.savedDataColumnSidecarCount >= mandatoryNumberOfColumns {
return errTooManyDataColumns
}
// SSZ encode the data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
// Compute the size of the SSZ encoded data column sidecar.
incomingSszEncodedDataColumnSidecarSize := uint32(len(sszEncodedDataColumnSidecar))
// Check if the incoming encoded data column sidecar size corresponds to the one read from the file.
if incomingSszEncodedDataColumnSidecarSize != metadata.sszEncodedDataColumnSidecarSize {
return errWrongSszEncodedDataColumnSidecarSize
}
// Alter indices to mark the data column as saved.
if err := metadata.indices.set(dataColumnIndex, uint8(metadata.savedDataColumnSidecarCount)); err != nil {
return errors.Wrap(err, "set index")
}
metadata.savedDataColumnSidecarCount++
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
}
}
// Save indices to the file.
indices := metadata.indices.raw()
count, err := file.WriteAt(indices[:], int64(versionSize+encodedSszEncodedDataColumnSidecarSizeSize))
if err != nil {
return errors.Wrap(err, "write indices")
}
if count != mandatoryNumberOfColumns {
return errWrongBytesWritten
}
// Append the SSZ encoded data column sidecars to the end of the file.
count, err = file.WriteAt(sszEncodedDataColumnSidecars, metadata.fileSize)
if err != nil {
return errors.Wrap(err, "write SSZ encoded data column sidecars")
}
if count != len(sszEncodedDataColumnSidecars) {
return errWrongBytesWritten
}
return nil
}
// saveDataColumnSidecarsNewFile saves data column sidecars into a new file.
// This function expects all data column sidecars to belong to the same block.
func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
// Initialize the indices.
var indices storageIndices
var (
sszEncodedDataColumnSidecarRefSize int
sszEncodedDataColumnSidecars []byte
)
// Initialize the count of the saved SSZ encoded data column sidecar.
storedCount := uint8(0)
for {
dataColumnSidecars := pullChan(inputDataColumnSidecars)
if len(dataColumnSidecars) == 0 {
break
}
for _, dataColumnSidecar := range dataColumnSidecars {
// Extract the data column index.
dataColumnIndex := dataColumnSidecar.Index
// Skip if the data column is already stored.
ok, _, err := indices.get(dataColumnIndex)
if err != nil {
return errors.Wrap(err, "get index")
}
if ok {
continue
}
// Alter the indices to mark the first data column sidecar as saved.
// savedCount can safely be cast to uint8 since it is less than limit.
if err := indices.set(dataColumnIndex, storedCount); err != nil {
return errors.Wrap(err, "set index")
}
// Increment the count of the saved SSZ encoded data column sidecar.
storedCount++
// SSZ encode the first data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
// Check if the size of the SSZ encoded data column sidecar is correct.
if sszEncodedDataColumnSidecarRefSize != 0 && len(sszEncodedDataColumnSidecar) != sszEncodedDataColumnSidecarRefSize {
return errWrongSszEncodedDataColumnSidecarSize
}
// Set the SSZ encoded data column sidecar reference size.
sszEncodedDataColumnSidecarRefSize = len(sszEncodedDataColumnSidecar)
// Append the first SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
}
}
if storedCount == 0 {
// Nothing to save.
return nil
}
// Create the data column sidecars file.
dir := filepath.Dir(filePath)
if err := dcs.fs.MkdirAll(dir, directoryPermissions()); err != nil {
return errors.Wrapf(err, "mkdir all")
}
file, err := dcs.fs.Create(filePath)
if err != nil {
return errors.Wrap(err, "data column sidecars file path create")
}
defer func() {
closeErr := file.Close()
// Overwrite the existing error only if it is nil, since the close error is less important.
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Encode the SSZ encoded data column sidecar size.
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarRefSize))
// Get the raw indices.
rawIndices := indices.raw()
// Concatenate the version, the data column sidecar size, the data column indices and the SSZ encoded data column sidecar.
countToWrite := headerSize + len(sszEncodedDataColumnSidecars)
bytes := make([]byte, 0, countToWrite)
bytes = append(bytes, byte(version))
bytes = append(bytes, encodedSszEncodedDataColumnSidecarSize[:]...)
bytes = append(bytes, rawIndices[:]...)
bytes = append(bytes, sszEncodedDataColumnSidecars...)
countWritten, err := file.Write(bytes)
if err != nil {
return errors.Wrap(err, "write")
}
if countWritten != countToWrite {
return errWrongBytesWritten
}
return nil
}
// metadata runs file sanity checks and retrieves metadata of the file.
// The file descriptor is left at the beginning of the first SSZ encoded data column sidecar.
func (dcs *DataColumnStorage) metadata(file afero.File) (*metadata, error) {
var header [headerSize]byte
countRead, err := file.ReadAt(header[:], 0)
if err != nil {
return nil, errors.Wrap(err, "read at")
}
if countRead != headerSize {
return nil, errWrongBytesHeaderRead
}
// Read the encoded file version.
encodedFileVersion := header[versionOffset : versionOffset+versionSize]
// Convert the version to an int.
fileVersion := int(encodedFileVersion[0])
// Check if the version is the expected one.
if fileVersion != version {
return nil, errWrongVersion
}
// DataColumnSidecar is a variable sized ssz object, but all data columns for a block will be the same size.
encodedSszEncodedDataColumnSidecarSize := header[encodedSszEncodedDataColumnSidecarSizeOffset : encodedSszEncodedDataColumnSidecarSizeOffset+encodedSszEncodedDataColumnSidecarSizeSize]
// Convert the SSZ encoded data column sidecar size to an int.
sszEncodedDataColumnSidecarSize := binary.BigEndian.Uint32(encodedSszEncodedDataColumnSidecarSize)
// Read the data column indices.
indices, err := newStorageIndices(header[indicesOffset : indicesOffset+mandatoryNumberOfColumns])
if err != nil {
return nil, errors.Wrap(err, "new storage indices")
}
// Compute the saved columns count.
savedDataColumnSidecarCount := int64(indices.len())
// Compute the size of the file.
// It is safe to cast the SSZ encoded data column sidecar size to int64 since it is less than 2**63.
fileSize := int64(headerSize) + savedDataColumnSidecarCount*int64(sszEncodedDataColumnSidecarSize) // lint:ignore uintcast
metadata := &metadata{
indices: indices,
savedDataColumnSidecarCount: savedDataColumnSidecarCount,
sszEncodedDataColumnSidecarSize: sszEncodedDataColumnSidecarSize,
fileSize: fileSize,
}
return metadata, nil
}
func (dcs *DataColumnStorage) fileMutex(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedRODataColumn) {
dcs.mu.Lock()
defer dcs.mu.Unlock()
mc, ok := dcs.muChans[root]
if !ok {
mc = &muChan{
mu: new(sync.RWMutex),
toStore: make(chan []blocks.VerifiedRODataColumn, 1),
}
dcs.muChans[root] = mc
return mc.mu, mc.toStore
}
return mc.mu, mc.toStore
}
func newStorageIndices(indices []byte) (*storageIndices, error) {
if len(indices) != mandatoryNumberOfColumns {
return nil, errWrongNumberOfColumns
}
var storageIndices storageIndices
copy(storageIndices.indices[:], indices)
return &storageIndices, nil
}
// get returns a boolean indicating if the data column sidecar is saved,
// and the position of the data column sidecar in the file.
func (si *storageIndices) get(dataColumnIndex uint64) (bool, int64, error) {
if dataColumnIndex >= mandatoryNumberOfColumns {
return false, 0, errDataColumnIndexTooLarge
}
if si.indices[dataColumnIndex] < nonZeroOffset {
return false, 0, nil
}
return true, int64(si.indices[dataColumnIndex] - nonZeroOffset), nil
}
// len returns the number of saved data column sidecars.
func (si *storageIndices) len() int {
count := 0
for _, i := range si.indices {
if i >= nonZeroOffset {
count++
}
}
return count
}
// all returns all saved data column sidecars.
func (si *storageIndices) all() []uint64 {
indices := make([]uint64, 0, len(si.indices))
for index, i := range si.indices {
if i >= nonZeroOffset {
indices = append(indices, uint64(index))
}
}
return indices
}
// raw returns the raw data column sidecar indices.
// It can be safely modified by the caller.
func (si *storageIndices) raw() [mandatoryNumberOfColumns]byte {
var result [mandatoryNumberOfColumns]byte
copy(result[:], si.indices[:])
return result
}
// set sets the data column sidecar as saved.
func (si *storageIndices) set(dataColumnIndex uint64, position uint8) error {
if dataColumnIndex >= mandatoryNumberOfColumns || position >= mandatoryNumberOfColumns {
return errDataColumnIndexTooLarge
}
si.indices[dataColumnIndex] = nonZeroOffset + position
return nil
}
// pullChan pulls data column sidecars from the input channel until it is empty.
func pullChan(inputRoDataColumns chan []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
numberOfColumns := params.BeaconConfig().NumberOfColumns
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns)
for {
select {
case dataColumnSidecar := <-inputRoDataColumns:
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar...)
default:
return dataColumnSidecars
}
}
}
// filePath builds the file path in database for a given root and epoch.
func filePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
return path.Join(
fmt.Sprintf("%d", period(epoch)),
fmt.Sprintf("%d", epoch),
fmt.Sprintf("%#x.%s", root, dataColumnsFileExtension),
)
}
// extractFileMetadata extracts the metadata from a file path.
// If the path is not a leaf, it returns nil.
func extractFileMetadata(path string) (*fileMetadata, error) {
// Is this Windows friendly?
parts := strings.Split(path, "/")
if len(parts) != 3 {
return nil, errors.Errorf("unexpected file %s", path)
}
period, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse period from %s", path)
}
epoch, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse epoch from %s", path)
}
partsRoot := strings.Split(parts[2], ".")
if len(partsRoot) != 2 {
return nil, errors.Errorf("failed to parse root from %s", path)
}
blockRootString := partsRoot[0]
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
return nil, errors.Errorf("unexpected file name %s", path)
}
if partsRoot[1] != dataColumnsFileExtension {
return nil, errors.Errorf("unexpected extension %s", path)
}
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
if err != nil {
return nil, errors.Wrapf(err, "decode string from %s", path)
}
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], blockRootSlice)
result := &fileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
return result, nil
}
// period computes the period of a given epoch.
func period(epoch primitives.Epoch) uint64 {
return uint64(epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
}

View File

@@ -1,219 +0,0 @@
package filesystem
import (
"sync"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
var errDataColumnIndexOutOfBounds = errors.New("data column index too high")
// DataColumnStorageSummary represents cached information about the DataColumnSidecars on disk for each root the cache knows about.
type DataColumnStorageSummary struct {
epoch primitives.Epoch
mask [fieldparams.NumberOfColumns]bool
}
// NewDataColumnStorageSummary creates a new DataColumnStorageSummary for a given epoch and mask.
func NewDataColumnStorageSummary(epoch primitives.Epoch, mask [fieldparams.NumberOfColumns]bool) DataColumnStorageSummary {
return DataColumnStorageSummary{
epoch: epoch,
mask: mask,
}
}
// HasIndex returns true if the DataColumnSidecar at the given index is available in the filesystem.
func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
if index >= uint64(fieldparams.NumberOfColumns) {
return false
}
return s.mask[index]
}
// Count returns the number of available data columns.
func (s DataColumnStorageSummary) Count() uint64 {
count := uint64(0)
for _, available := range s.mask {
if available {
count++
}
}
return count
}
// AllAvailable returns true if we have all data columns for corresponding indices.
func (s DataColumnStorageSummary) AllAvailable(indices map[uint64]bool) bool {
if len(indices) > len(s.mask) {
return false
}
for index := range indices {
if !s.mask[index] {
return false
}
}
return true
}
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
type DataColumnStorageSummarizer interface {
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
}
type dataColumnStorageSummaryCache struct {
mu sync.RWMutex
dataColumnCount float64
lowestCachedEpoch primitives.Epoch
highestCachedEpoch primitives.Epoch
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
}
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
return &dataColumnStorageSummaryCache{
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
// Summary returns the DataColumnStorageSummary for `root`.
// The DataColumnStorageSummary can be used to check for the presence of DataColumnSidecars based on Index.
func (sc *dataColumnStorageSummaryCache) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
sc.mu.RLock()
defer sc.mu.RUnlock()
return sc.cache[root]
}
func (sc *dataColumnStorageSummaryCache) HighestEpoch() primitives.Epoch {
sc.mu.RLock()
defer sc.mu.RUnlock()
return sc.highestCachedEpoch
}
// set updates the cache.
func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent) error {
numberOfColumns := params.BeaconConfig().NumberOfColumns
sc.mu.Lock()
defer sc.mu.Unlock()
summary := sc.cache[dataColumnsIdent.Root]
summary.epoch = dataColumnsIdent.Epoch
count := uint64(0)
for _, index := range dataColumnsIdent.Indices {
if index >= numberOfColumns {
return errDataColumnIndexOutOfBounds
}
if summary.mask[index] {
continue
}
count++
summary.mask[index] = true
sc.cache[dataColumnsIdent.Root] = summary
sc.lowestCachedEpoch = min(sc.lowestCachedEpoch, dataColumnsIdent.Epoch)
sc.highestCachedEpoch = max(sc.highestCachedEpoch, dataColumnsIdent.Epoch)
}
countFloat := float64(count)
sc.dataColumnCount += countFloat
dataColumnDiskCount.Set(sc.dataColumnCount)
dataColumnWrittenCounter.Add(countFloat)
return nil
}
// get returns the DataColumnStorageSummary for the given block root.
// If the root is not in the cache, the second return value will be false.
func (sc *dataColumnStorageSummaryCache) get(blockRoot [fieldparams.RootLength]byte) (DataColumnStorageSummary, bool) {
sc.mu.RLock()
defer sc.mu.RUnlock()
v, ok := sc.cache[blockRoot]
return v, ok
}
// evict removes the DataColumnStorageSummary for the given block root from the cache.
func (s *dataColumnStorageSummaryCache) evict(blockRoot [fieldparams.RootLength]byte) int {
deleted := 0
s.mu.Lock()
defer s.mu.Unlock()
summary, ok := s.cache[blockRoot]
if !ok {
return 0
}
for i := range summary.mask {
if summary.mask[i] {
deleted += 1
}
}
delete(s.cache, blockRoot)
if deleted > 0 {
s.dataColumnCount -= float64(deleted)
dataColumnDiskCount.Set(s.dataColumnCount)
}
// The lowest and highest cached epoch may no longer be valid here,
// but is not worth the effort to recalculate.
return deleted
}
// pruneUpTo removes all entries from the cache up to the given target epoch included.
func (sc *dataColumnStorageSummaryCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
sc.mu.Lock()
defer sc.mu.Unlock()
prunedCount := uint64(0)
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
newHighestCachedEpoch := primitives.Epoch(0)
for blockRoot, summary := range sc.cache {
epoch := summary.epoch
if epoch > targetEpoch {
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
}
if epoch <= targetEpoch {
for i := range summary.mask {
if summary.mask[i] {
prunedCount += 1
}
}
delete(sc.cache, blockRoot)
}
}
if prunedCount > 0 {
sc.lowestCachedEpoch = newLowestCachedEpoch
sc.highestCachedEpoch = newHighestCachedEpoch
sc.dataColumnCount -= float64(prunedCount)
dataColumnDiskCount.Set(sc.dataColumnCount)
}
return prunedCount
}
// clear removes all entries from the cache.
func (sc *dataColumnStorageSummaryCache) clear() uint64 {
return sc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
}

View File

@@ -1,235 +0,0 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestHasIndex(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
hasIndex := summary.HasIndex(1_000_000)
require.Equal(t, false, hasIndex)
hasIndex = summary.HasIndex(0)
require.Equal(t, false, hasIndex)
hasIndex = summary.HasIndex(1)
require.Equal(t, true, hasIndex)
}
func TestCount(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
count := summary.Count()
require.Equal(t, uint64(2), count)
}
func TestAllAvailableDataColumns(t *testing.T) {
const count = uint64(1_000)
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
indices := make(map[uint64]bool, count)
for i := range count {
indices[i] = true
}
allAvailable := summary.AllAvailable(indices)
require.Equal(t, false, allAvailable)
indices = map[uint64]bool{1: true, 2: true}
allAvailable = summary.AllAvailable(indices)
require.Equal(t, false, allAvailable)
indices = map[uint64]bool{1: true, 3: true}
allAvailable = summary.AllAvailable(indices)
require.Equal(t, true, allAvailable)
}
func TestSummary(t *testing.T) {
root := [fieldparams.RootLength]byte{}
summaryCache := newDataColumnStorageSummaryCache()
expected := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{})
actual := summaryCache.Summary(root)
require.DeepEqual(t, expected, actual)
summaryCache = newDataColumnStorageSummaryCache()
expected = NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{true, false, true, false})
summaryCache.cache[root] = expected
actual = summaryCache.Summary(root)
require.DeepEqual(t, expected, actual)
}
func TestHighestEpoch(t *testing.T) {
root1 := [fieldparams.RootLength]byte{1}
root2 := [fieldparams.RootLength]byte{2}
root3 := [fieldparams.RootLength]byte{3}
summaryCache := newDataColumnStorageSummaryCache()
actual := summaryCache.HighestEpoch()
require.Equal(t, primitives.Epoch(0), actual)
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(42), summaryCache.HighestEpoch())
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1, 3}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
err = summaryCache.set(DataColumnsIdent{Root: root3, Epoch: 40, Indices: []uint64{1, 3}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
}
func TestSet(t *testing.T) {
t.Run("Index out of bounds", func(t *testing.T) {
summaryCache := newDataColumnStorageSummaryCache()
err := summaryCache.set(DataColumnsIdent{Indices: []uint64{1_000_000}})
require.ErrorIs(t, err, errDataColumnIndexOutOfBounds)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
require.Equal(t, 0, len(summaryCache.cache))
})
t.Run("Nominal", func(t *testing.T) {
root1 := [fieldparams.RootLength]byte{1}
root2 := [fieldparams.RootLength]byte{2}
summaryCache := newDataColumnStorageSummaryCache()
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
require.Equal(t, 1, len(summaryCache.cache))
expected := DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}
actual := summaryCache.cache[root1]
require.DeepEqual(t, expected, actual)
err = summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{0, 1}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
require.Equal(t, 1, len(summaryCache.cache))
expected = DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{true, true, false, true}}
actual = summaryCache.cache[root1]
require.DeepEqual(t, expected, actual)
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1}})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch) // Epoch 42 is still the lowest
require.Equal(t, 2, len(summaryCache.cache))
expected = DataColumnStorageSummary{epoch: 43, mask: [fieldparams.NumberOfColumns]bool{false, true}}
actual = summaryCache.cache[root2]
require.DeepEqual(t, expected, actual)
})
}
func TestGet(t *testing.T) {
t.Run("Not in cache", func(t *testing.T) {
summaryCache := newDataColumnStorageSummaryCache()
root := [fieldparams.RootLength]byte{}
_, ok := summaryCache.get(root)
require.Equal(t, false, ok)
})
t.Run("In cache", func(t *testing.T) {
root := [fieldparams.RootLength]byte{}
summaryCache := newDataColumnStorageSummaryCache()
summaryCache.cache[root] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
actual, ok := summaryCache.get(root)
require.Equal(t, true, ok)
expected := NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
require.DeepEqual(t, expected, actual)
})
}
func TestEvict(t *testing.T) {
t.Run("No eviction", func(t *testing.T) {
root := [fieldparams.RootLength]byte{}
summaryCache := newDataColumnStorageSummaryCache()
evicted := summaryCache.evict(root)
require.Equal(t, 0, evicted)
})
t.Run("Eviction", func(t *testing.T) {
root1 := [fieldparams.RootLength]byte{1}
root2 := [fieldparams.RootLength]byte{2}
summaryCache := newDataColumnStorageSummaryCache()
summaryCache.cache[root1] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
summaryCache.cache[root2] = NewDataColumnStorageSummary(43, [fieldparams.NumberOfColumns]bool{false, true, false, true})
evicted := summaryCache.evict(root1)
require.Equal(t, 2, evicted)
require.Equal(t, 1, len(summaryCache.cache))
_, ok := summaryCache.cache[root1]
require.Equal(t, false, ok)
_, ok = summaryCache.cache[root2]
require.Equal(t, true, ok)
})
}
func TestPruneUpTo(t *testing.T) {
t.Run("No pruning", func(t *testing.T) {
summaryCache := newDataColumnStorageSummaryCache()
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
require.NoError(t, err)
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 43, Indices: []uint64{2, 4}})
require.NoError(t, err)
count := summaryCache.pruneUpTo(41)
require.Equal(t, uint64(0), count)
require.Equal(t, 2, len(summaryCache.cache))
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
})
t.Run("Pruning", func(t *testing.T) {
summaryCache := newDataColumnStorageSummaryCache()
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
require.NoError(t, err)
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
require.NoError(t, err)
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
require.NoError(t, err)
count := summaryCache.pruneUpTo(42)
require.Equal(t, uint64(1), count)
require.Equal(t, 2, len(summaryCache.cache))
require.Equal(t, primitives.Epoch(44), summaryCache.lowestCachedEpoch)
count = summaryCache.pruneUpTo(45)
require.Equal(t, uint64(4), count)
require.Equal(t, 0, len(summaryCache.cache))
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
})
t.Run("Clear", func(t *testing.T) {
summaryCache := newDataColumnStorageSummaryCache()
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
require.NoError(t, err)
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
require.NoError(t, err)
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
require.NoError(t, err)
count := summaryCache.clear()
require.Equal(t, uint64(5), count)
require.Equal(t, 0, len(summaryCache.cache))
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
})
}

View File

@@ -1,734 +0,0 @@
package filesystem
import (
"context"
"encoding/binary"
"os"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/spf13/afero"
)
func TestNewDataColumnStorage(t *testing.T) {
ctx := context.Background()
t.Run("No base path", func(t *testing.T) {
_, err := NewDataColumnStorage(ctx)
require.ErrorIs(t, err, errNoBasePath)
})
t.Run("Nominal", func(t *testing.T) {
dir := t.TempDir()
storage, err := NewDataColumnStorage(ctx, WithDataColumnBasePath(dir))
require.NoError(t, err)
require.Equal(t, dir, storage.base)
})
}
func TestWarmCache(t *testing.T) {
storage, err := NewDataColumnStorage(
context.Background(),
WithDataColumnBasePath(t.TempDir()),
WithDataColumnRetentionEpochs(10_000),
)
require.NoError(t, err)
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{0}: {
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
},
{1}: {
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
},
{2}: {
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
},
{3}: {
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
},
{4}: {
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
},
{5}: {
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
},
{6}: {
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
},
},
)
err = storage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
storage.retentionEpochs = 4_096
storage.WarmCache()
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
require.Equal(t, 6, len(storage.cache.cache))
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
require.Equal(t, true, ok)
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
}
func TestSaveDataColumnsSidecars(t *testing.T) {
t.Run("wrong numbers of columns", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.NumberOfColumns = 0
params.OverrideBeaconConfig(cfg)
params.SetupTestConfigCleanup(t)
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.ErrorIs(t, err, errWrongNumberOfColumns)
})
t.Run("one of the column index is too large", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.ErrorIs(t, err, errDataColumnIndexTooLarge)
})
t.Run("different slots", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{}: {
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
})
t.Run("new file - no data columns to save", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{{}: {}},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
})
t.Run("new file - different data column size", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{}: {
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
})
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
)
// Save data columns into a file.
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
// Build a data column sidecar for the same block but with a different
// column index and an different SSZ encoded size.
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
)
// Try to rewrite the file.
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
})
t.Run("nominal", func(t *testing.T) {
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
},
{2}: {
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
require.NoError(t, err)
_, inputVerifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
},
{3}: {
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
},
},
)
err = dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
require.NoError(t, err)
type fixture struct {
fileName string
blockRoot [fieldparams.RootLength]byte
expectedIndices [mandatoryNumberOfColumns]byte
dataColumnParams []verification.DataColumnParams
}
fixtures := []fixture{
{
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
blockRoot: [fieldparams.RootLength]byte{1},
expectedIndices: [mandatoryNumberOfColumns]byte{
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
// The rest is filled with zeroes.
},
dataColumnParams: []verification.DataColumnParams{
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
},
},
{
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
blockRoot: [fieldparams.RootLength]byte{2},
expectedIndices: [mandatoryNumberOfColumns]byte{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
// The rest is filled with zeroes.
},
dataColumnParams: []verification.DataColumnParams{
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
},
},
{
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
blockRoot: [fieldparams.RootLength]byte{3},
expectedIndices: [mandatoryNumberOfColumns]byte{
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
// The rest is filled with zeroes.
},
dataColumnParams: []verification.DataColumnParams{
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
},
},
}
for _, fixture := range fixtures {
// Build expected data column sidecars.
_, expectedDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
)
// Build expected bytes.
firstSszEncodedDataColumnSidecar, err := expectedDataColumnSidecars[0].MarshalSSZ()
require.NoError(t, err)
dataColumnSidecarsCount := len(expectedDataColumnSidecars)
sszEncodedDataColumnSidecarSize := len(firstSszEncodedDataColumnSidecar)
sszEncodedDataColumnSidecars := make([]byte, 0, dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, firstSszEncodedDataColumnSidecar...)
for _, dataColumnSidecar := range expectedDataColumnSidecars[1:] {
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
require.NoError(t, err)
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
}
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarSize))
expectedBytes := make([]byte, 0, headerSize+dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
expectedBytes = append(expectedBytes, []byte{0x01}...)
expectedBytes = append(expectedBytes, encodedSszEncodedDataColumnSidecarSize[:]...)
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
// Check the actual content of the file.
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
require.NoError(t, err)
require.DeepSSZEqual(t, expectedBytes, actualBytes)
// Check the summary.
indices := map[uint64]bool{}
for _, dataColumnParam := range fixture.dataColumnParams {
indices[dataColumnParam.ColumnIndex] = true
}
summary := dataColumnStorage.Summary(fixture.blockRoot)
for index := range uint64(mandatoryNumberOfColumns) {
require.Equal(t, indices[index], summary.HasIndex(index))
}
err = dataColumnStorage.Remove(fixture.blockRoot)
require.NoError(t, err)
summary = dataColumnStorage.Summary(fixture.blockRoot)
for index := range uint64(mandatoryNumberOfColumns) {
require.Equal(t, false, summary.HasIndex(index))
}
_, err = afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
require.ErrorIs(t, err, os.ErrNotExist)
}
})
}
func TestGetDataColumnSidecars(t *testing.T) {
t.Run("not found", func(t *testing.T) {
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
require.NoError(t, err)
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
})
t.Run("nominal", func(t *testing.T) {
_, expectedVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
require.NoError(t, err)
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
require.NoError(t, err)
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
require.NoError(t, err)
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
})
}
func TestRemove(t *testing.T) {
t.Run("not found", func(t *testing.T) {
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
require.NoError(t, err)
})
t.Run("nominal", func(t *testing.T) {
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
},
{2}: {
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
require.NoError(t, err)
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
require.NoError(t, err)
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
require.Equal(t, primitives.Epoch(0), summary.epoch)
require.Equal(t, uint64(0), summary.Count())
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
require.Equal(t, primitives.Epoch(1), summary.epoch)
require.Equal(t, uint64(2), summary.Count())
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(actual))
})
}
func TestClear(t *testing.T) {
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
require.NoError(t, err)
filePaths := []string{
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
}
for _, filePath := range filePaths {
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
require.NoError(t, err)
}
err = dataColumnStorage.Clear()
require.NoError(t, err)
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
for index := range uint64(mandatoryNumberOfColumns) {
require.Equal(t, false, summary.HasIndex(index))
}
for _, filePath := range filePaths {
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
require.ErrorIs(t, err, os.ErrNotExist)
}
}
func TestMetadata(t *testing.T) {
t.Run("wrong version", func(t *testing.T) {
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
},
)
// Save data columns into a file.
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
// Alter the version.
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
require.NoError(t, err)
count, err := file.Write([]byte{42})
require.NoError(t, err)
require.Equal(t, 1, count)
// Try to read the metadata.
_, err = dataColumnStorage.metadata(file)
require.ErrorIs(t, err, errWrongVersion)
err = file.Close()
require.NoError(t, err)
})
}
func TestNewStorageIndices(t *testing.T) {
t.Run("wrong number of columns", func(t *testing.T) {
_, err := newStorageIndices(nil)
require.ErrorIs(t, err, errWrongNumberOfColumns)
})
t.Run("nominal", func(t *testing.T) {
var indices [mandatoryNumberOfColumns]byte
indices[0] = 1
storageIndices, err := newStorageIndices(indices[:])
require.NoError(t, err)
require.Equal(t, indices, storageIndices.indices)
})
}
func TestStorageIndicesGet(t *testing.T) {
t.Run("index too large", func(t *testing.T) {
var indices storageIndices
_, _, err := indices.get(1_000_000)
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
})
t.Run("index not set", func(t *testing.T) {
const expected = false
var indices storageIndices
actual, _, err := indices.get(0)
require.NoError(t, err)
require.Equal(t, expected, actual)
})
t.Run("index set", func(t *testing.T) {
const (
expectedOk = true
expectedPosition = int64(3)
)
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131}}
actualOk, actualPosition, err := indices.get(1)
require.NoError(t, err)
require.Equal(t, expectedOk, actualOk)
require.Equal(t, expectedPosition, actualPosition)
})
}
func TestStorageIndicesLen(t *testing.T) {
const expectedLen = 2
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
actualLen := indices.len()
require.Equal(t, expectedLen, actualLen)
}
func TestStorageIndicesAll(t *testing.T) {
expectedIndices := []uint64{1, 3}
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
actualIndices := indices.all()
require.DeepEqual(t, expectedIndices, actualIndices)
}
func TestStorageIndicesSet(t *testing.T) {
t.Run("data column index too large", func(t *testing.T) {
var indices storageIndices
err := indices.set(1_000_000, 0)
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
})
t.Run("position too large", func(t *testing.T) {
var indices storageIndices
err := indices.set(0, 255)
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
})
t.Run("nominal", func(t *testing.T) {
expected := [mandatoryNumberOfColumns]byte{0, 0, 128, 0, 131}
var indices storageIndices
err := indices.set(2, 0)
require.NoError(t, err)
err = indices.set(4, 3)
require.NoError(t, err)
require.Equal(t, expected, indices.indices)
})
}
func TestPrune(t *testing.T) {
t.Run(("nothing to prune"), func(t *testing.T) {
dir := t.TempDir()
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir))
require.NoError(t, err)
dataColumnStorage.prune()
})
t.Run("nominal", func(t *testing.T) {
var compareSlices = func(left, right []string) bool {
if len(left) != len(right) {
return false
}
leftMap := make(map[string]bool, len(left))
for _, leftItem := range left {
leftMap[leftItem] = true
}
for _, rightItem := range right {
if _, ok := leftMap[rightItem]; !ok {
return false
}
}
return true
}
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{0}: {
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
},
{1}: {
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
},
{2}: {
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
},
{3}: {
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
},
{4}: {
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
},
{5}: {
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
},
},
)
dir := t.TempDir()
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir), WithDataColumnRetentionEpochs(10_000))
require.NoError(t, err)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
dirs, err := listDir(dataColumnStorage.fs, ".")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0", "1", "2"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "0")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"1", "4000"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "1")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"4099", "4098"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "2")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "0/1")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
t,
verification.DataColumnsParamsByRoot{
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
},
)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
// dataColumnStorage.prune(14_098)
dataColumnStorage.prune()
dirs, err = listDir(dataColumnStorage.fs, ".")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"1", "2", "3"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "1")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"4099"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "2")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "3")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"14098"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
require.NoError(t, err)
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
})
}

View File

@@ -1,104 +0,0 @@
package filesystem
// nolint:dupword
/*
Data column sidecars storage documentation
==========================================
File organisation
-----------------
- The first byte represents the version of the file structure (up to 0xff = 255).
We set it to 0x01.
Note: This is not strictly needed, but it will help a lot if, in the future,
we want to modify the file structure.
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
|------------------------------------------|------------------------------------------------------------------------------------|
| Byte offset | Description |
|------------------------------------------|------------------------------------------------------------------------------------|
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
| ... | ... |
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|------------------------------------------|------------------------------------------------------------------------------------|
Each file is named after the block root where the data columns were data columns are committed to.
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
Database organisation
---------------------
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
Example:
data-columns
├── 0
│   ├── 3638
│   │   ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│   │   ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│   │   ├── ...
│   │   ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│   │   └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│   └── 3639
│      ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│      ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│      ├── ...
│      ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│      └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│   ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│   ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│   ├── ...
│   ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│   └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
   ├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
   ├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
   ├── ...
   ├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
   └── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
Computation of the maximum size of a DataColumnSidecar
------------------------------------------------------
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
- index: 2 bytes (ColumnIndex)
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
TOTAL: 8,782,060 bytes = 70,256,480 bits
log(70,256,480) / log(2) ~= 26.07
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
*/

View File

@@ -1,6 +1,7 @@
package filesystem
import (
"bytes"
"fmt"
"io"
"math"
@@ -219,6 +220,28 @@ func TestListDir(t *testing.T) {
}
}
func TestSlotFromBlob(t *testing.T) {
cases := []struct {
slot primitives.Slot
}{
{slot: 0},
{slot: 2},
{slot: 1123581321},
{slot: math.MaxUint64},
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc := sidecars[0]
enc, err := sc.MarshalSSZ()
require.NoError(t, err)
slot, err := slotFromBlob(bytes.NewReader(enc))
require.NoError(t, err)
require.Equal(t, c.slot, slot)
})
}
}
func TestIterationComplete(t *testing.T) {
targets := []migrationTestTarget{
{

View File

@@ -1,12 +1,13 @@
package filesystem
import (
"encoding/binary"
"io"
"os"
"path"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/spf13/afero"
)
@@ -114,25 +115,29 @@ func (l *flatLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error)
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
func slotFromFile(name string, fs afero.Fs) (primitives.Slot, error) {
// Read whole file, try to unmarshal it as data columns sidecar, if it fails, try to unmarshal it as blob sidecar.
content, err := afero.ReadFile(fs, name)
f, err := fs.Open(name)
if err != nil {
return 0, err
}
defer func() {
if err := f.Close(); err != nil {
log.WithError(err).Errorf("Could not close blob file")
}
}()
return slotFromBlob(f)
}
dataColumnSidecar := &ethpb.DataColumnSidecar{}
err = dataColumnSidecar.UnmarshalSSZ(content)
if err == nil {
return dataColumnSidecar.SignedBlockHeader.Header.Slot, nil
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
// preceding the slot information within SignedBeaconBlockHeader.
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
b := make([]byte, 8)
_, err := at.ReadAt(b, 131176)
if err != nil {
return 0, err
}
blobSidecar := &ethpb.BlobSidecar{}
err = blobSidecar.UnmarshalSSZ(content)
if err == nil {
return blobSidecar.SignedBlockHeader.Header.Slot, nil
}
return 0, errors.New("failed to unmarshal as blob sidecar or data column sidecar")
rawSlot := binary.LittleEndian.Uint64(b)
return primitives.Slot(rawSlot), nil
}
type flatSlotReader struct {

View File

@@ -6,7 +6,6 @@ import (
)
var (
// Blobs
blobBuckets = []float64{3, 5, 7, 9, 11, 13}
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "blob_storage_save_latency",
@@ -34,29 +33,4 @@ var (
Name: "blob_disk_bytes",
Help: "Approximate number of bytes occupied by blobs in storage",
})
// Data columns
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "data_column_storage_save_latency",
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
Buckets: dataColumnBuckets,
})
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "data_column_storage_get_latency",
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
Buckets: dataColumnBuckets,
})
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "data_column_pruned",
Help: "Number of DataColumnSidecar pruned.",
})
dataColumnWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "data_column_written",
Help: "Number of DataColumnSidecar written",
})
dataColumnDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "data_column_disk_count",
Help: "Approximate number of data columns in storage",
})
)

View File

@@ -1,19 +1,14 @@
package filesystem
import (
"context"
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/spf13/afero"
)
// Blobs
// -----
// NewEphemeralBlobStorage should only be used for tests.
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
@@ -80,52 +75,3 @@ func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStor
}
return c
}
// Data columns
// ------------
// NewEphemeralDataColumnStorage should only be used for tests.
// The instance of DataColumnStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralDataColumnStorage(t testing.TB, opts ...DataColumnStorageOption) *DataColumnStorage {
return NewWarmedEphemeralDataColumnStorageUsingFs(t, afero.NewMemMapFs(), opts...)
}
// NewEphemeralDataColumnStorageAndFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the DataColumnStorage API.
func NewEphemeralDataColumnStorageAndFs(t testing.TB, opts ...DataColumnStorageOption) (afero.Fs, *DataColumnStorage) {
fs := afero.NewMemMapFs()
dcs := NewWarmedEphemeralDataColumnStorageUsingFs(t, fs, opts...)
return fs, dcs
}
func NewWarmedEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
bs := NewEphemeralDataColumnStorageUsingFs(t, fs, opts...)
bs.WarmCache()
return bs
}
func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
opts = append(opts,
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
WithDataColumnFs(fs),
)
bs, err := NewDataColumnStorage(context.Background(), opts...)
if err != nil {
t.Fatal(err)
}
return bs
}
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
c := newDataColumnStorageSummaryCache()
for root, indices := range set {
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
t.Fatal(err)
}
}
return c
}

View File

@@ -101,7 +101,7 @@ type NoHeadAccessDatabase interface {
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
}
// HeadAccessDatabase defines a struct with access to reading chain head data.
@@ -110,6 +110,7 @@ type HeadAccessDatabase interface {
// Block related methods.
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlockRoot() ([32]byte, error)
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.

View File

@@ -70,6 +70,21 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// HeadBlockRoot returns the latest canonical block root in the Ethereum Beacon Chain.
func (s *Store) HeadBlockRoot() ([32]byte, error) {
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
headRoot := bkt.Get(headBlockRootKey)
if headRoot == nil {
return errors.New("no head block root found")
}
copy(root[:], headRoot)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -214,7 +229,8 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
return err
// TODO: Find out why invalid states are in the db
log.WithError(err).Error("Could not delete state")
}
if err := s.deleteStateSummary(root); err != nil {
@@ -245,77 +261,82 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
// - blockRootValidatorHashesBucket
// - blockSlotIndicesBucket
// - stateSlotIndicesBucket
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot) error {
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot, batchSize int) (int, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteHistoricalDataBeforeSlot")
defer span.End()
// Collect slot/root pairs to perform deletions in a separate read only transaction.
var (
roots [][]byte
slts []primitives.Slot
)
err := s.db.View(func(tx *bolt.Tx) error {
var err error
roots, slts, err = blockRootsBySlotRange(ctx, tx.Bucket(blockSlotIndicesBucket), primitives.Slot(0), cutoffSlot, nil, nil, nil)
if err != nil {
return errors.Wrap(err, "could not retrieve block roots")
}
return nil
})
slotRoots, err := s.slotRootsInRange(ctx, primitives.Slot(0), cutoffSlot, batchSize)
if err != nil {
return errors.Wrap(err, "could not retrieve block roots and slots")
return 0, err
}
// Return early if there's nothing to delete.
if len(slotRoots) == 0 {
return 0, nil
}
// Perform all deletions in a single transaction for atomicity
return s.db.Update(func(tx *bolt.Tx) error {
for _, root := range roots {
var numSlotsDeleted int
err = s.db.Update(func(tx *bolt.Tx) error {
for _, sr := range slotRoots {
// Return if context is cancelled or deadline is exceeded.
if ctx.Err() != nil {
//nolint:nilerr
return nil
}
// Delete block
if err = s.deleteBlock(tx, root); err != nil {
if err = s.deleteBlock(tx, sr.root[:]); err != nil {
return err
}
// Delete finalized block roots index
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(root); err != nil {
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(sr.root[:]); err != nil {
return errors.Wrap(err, "could not delete finalized block root index")
}
// Delete state
if err = tx.Bucket(stateBucket).Delete(root); err != nil {
if err = tx.Bucket(stateBucket).Delete(sr.root[:]); err != nil {
return errors.Wrap(err, "could not delete state")
}
// Delete state summary
if err = tx.Bucket(stateSummaryBucket).Delete(root); err != nil {
if err = tx.Bucket(stateSummaryBucket).Delete(sr.root[:]); err != nil {
return errors.Wrap(err, "could not delete state summary")
}
// Delete validator entries
if err = s.deleteValidatorHashes(tx, root); err != nil {
if err = s.deleteValidatorHashes(tx, sr.root[:]); err != nil {
return errors.Wrap(err, "could not delete validators")
}
numSlotsDeleted++
}
for _, slot := range slts {
for _, sr := range slotRoots {
// Delete slot indices
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
return errors.Wrap(err, "could not delete block slot index")
}
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
return errors.Wrap(err, "could not delete state slot index")
}
}
// Delete all caches after we have deleted everything from buckets.
// This is done after the buckets are deleted to avoid any issues in case of transaction rollback.
for _, root := range roots {
for _, sr := range slotRoots {
// Delete block from cache
s.blockCache.Del(string(root))
s.blockCache.Del(string(sr.root[:]))
// Delete state summary from cache
s.stateSummaryCache.delete([32]byte(root))
s.stateSummaryCache.delete(sr.root)
}
return nil
})
return numSlotsDeleted, err
}
// SaveBlock to the db.
@@ -336,7 +357,7 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
// to the DB for future checks.
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
func (s *Store) shouldSaveBlinded() (bool, error) {
var saveBlinded bool
if err := s.db.View(func(tx *bolt.Tx) error {
metadataBkt := tx.Bucket(chainMetadataBucket)
@@ -398,7 +419,7 @@ func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEnt
}
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
shouldBlind, err := s.shouldSaveBlinded(ctx)
shouldBlind, err := s.shouldSaveBlinded()
if err != nil {
return err
}
@@ -669,6 +690,49 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
})
}
type slotRoot struct {
slot primitives.Slot
root [32]byte
}
// slotRootsInRange returns slot and block root pairs of length min(batchSize, end-slot)
func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot, batchSize int) ([]slotRoot, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.slotRootsInRange")
defer span.End()
if end < start {
return nil, errInvalidSlotRange
}
var pairs []slotRoot
key := bytesutil.SlotToBytesBigEndian(end)
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blockSlotIndicesBucket)
c := bkt.Cursor()
for k, v := c.Seek(key); k != nil; k, v = c.Prev() {
slot := bytesutil.BytesToSlotBigEndian(k)
if slot > end {
continue // Seek will seek to the next key *after* the given one if not present
}
if slot < start {
return nil
}
roots, err := splitRoots(v)
if err != nil {
return errors.Wrapf(err, "corrupt value %v in block slot index for slot=%d", v, slot)
}
for _, r := range roots {
pairs = append(pairs, slotRoot{slot: slot, root: r})
}
if len(pairs) >= batchSize {
return nil // allows code to easily cap the number of items pruned
}
}
return nil
})
return pairs, err
}
// blockRootsByFilter retrieves the block roots given the filter criteria.
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")
@@ -689,7 +753,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange, _, err := blockRootsBySlotRange(
rootsBySlotRange, err := blockRootsBySlotRange(
ctx,
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
@@ -734,13 +798,13 @@ func blockRootsBySlotRange(
ctx context.Context,
bkt *bolt.Bucket,
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
) ([][]byte, []primitives.Slot, error) {
) ([][]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
defer span.End()
// Return nothing when all slot parameters are missing
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
return [][]byte{}, nil, nil
return [][]byte{}, nil
}
var startSlot, endSlot primitives.Slot
@@ -761,11 +825,11 @@ func blockRootsBySlotRange(
if startEpochOk && endEpochOk {
startSlot, err = slots.EpochStart(startEpoch)
if err != nil {
return nil, nil, err
return nil, err
}
endSlot, err = slots.EpochStart(endEpoch)
if err != nil {
return nil, nil, err
return nil, err
}
endSlot = endSlot + params.BeaconConfig().SlotsPerEpoch - 1
}
@@ -776,11 +840,10 @@ func blockRootsBySlotRange(
return key != nil && bytes.Compare(key, max) <= 0
}
if endSlot < startSlot {
return nil, nil, errInvalidSlotRange
return nil, errInvalidSlotRange
}
rootsRange := endSlot.SubSlot(startSlot).Div(step)
roots := make([][]byte, 0, rootsRange)
var slts []primitives.Slot
c := bkt.Cursor()
for k, v := c.Seek(min); conditional(k, max); k, v = c.Next() {
slot := bytesutil.BytesToSlotBigEndian(k)
@@ -795,9 +858,8 @@ func blockRootsBySlotRange(
splitRoots = append(splitRoots, v[i:i+32])
}
roots = append(roots, splitRoots...)
slts = append(slts, slot)
}
return roots, slts, nil
return roots, nil
}
// blockRootsBySlot retrieves the block roots by slot

View File

@@ -359,184 +359,221 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
func TestStore_HistoricalDataBeforeSlot(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
// Save genesis block root
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
tests := []struct {
name string
batchSize int
numOfEpochs uint64
deleteBeforeSlot uint64
}{
{
name: "batchSize less than delete range",
batchSize: 10,
numOfEpochs: 4,
deleteBeforeSlot: 25,
},
{
name: "batchSize greater than delete range",
batchSize: 30,
numOfEpochs: 4,
deleteBeforeSlot: 15,
},
}
// Create and save blocks for 4 epochs
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
// Save genesis block root
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
// Mark state validator migration as complete
err := db.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
})
require.NoError(t, err)
// Create and save blocks for given epochs
blks := makeBlocks(t, 0, slotsPerEpoch*tt.numOfEpochs, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
migrated, err := db.isStateValidatorMigrationOver()
require.NoError(t, err)
require.Equal(t, true, migrated)
// Mark state validator migration as complete
err := db.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
})
require.NoError(t, err)
// Create state summaries and states for each block
ss := make([]*ethpb.StateSummary, len(blks))
states := make([]state.BeaconState, len(blks))
migrated, err := db.isStateValidatorMigrationOver()
require.NoError(t, err)
require.Equal(t, true, migrated)
for i, blk := range blks {
slot := blk.Block().Slot()
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
// Create state summaries and states for each block
ss := make([]*ethpb.StateSummary, len(blks))
states := make([]state.BeaconState, len(blks))
// Create and save state summary
ss[i] = &ethpb.StateSummary{
Slot: slot,
Root: r[:],
}
for i, blk := range blks {
slot := blk.Block().Slot()
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
// Create and save state with validator entries
vals := make([]*ethpb.Validator, 2)
for j := range vals {
vals[j] = &ethpb.Validator{
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
// Create and save state summary
ss[i] = &ethpb.StateSummary{
Slot: slot,
Root: r[:],
}
// Create and save state with validator entries
vals := make([]*ethpb.Validator, 2)
for j := range vals {
vals[j] = &ethpb.Validator{
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
}
}
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
state.Validators = vals
state.Slot = slot
return nil
})
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, r))
states[i] = st
// Verify validator entries are saved to db
valsActual, err := db.validatorEntries(ctx, r)
require.NoError(t, err)
for j, val := range valsActual {
require.DeepEqual(t, vals[j], val)
}
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
state.Validators = vals
state.Slot = slot
return nil
})
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, r))
states[i] = st
// Verify validator entries are saved to db
valsActual, err := db.validatorEntries(ctx, r)
require.NoError(t, err)
for j, val := range valsActual {
require.DeepEqual(t, vals[j], val)
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
// Verify slot indices exist before deletion
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
}
return nil
})
require.NoError(t, err)
// Delete data before slot at epoch 1
require.NoError(t, db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(slotsPerEpoch)))
// Verify blocks from epoch 0 are deleted
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Check block is deleted
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks))
// Verify block does not exist
assert.Equal(t, false, db.HasBlock(ctx, root))
// Verify block parent root does not exist
err = db.db.View(func(tx *bolt.Tx) error {
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
// Verify state is deleted
hasState := db.HasState(ctx, root)
assert.Equal(t, false, hasState)
// Verify state summary is deleted
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, false, hasSummary)
// Verify validator hashes for block roots are deleted
err = db.db.View(func(tx *bolt.Tx) error {
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
}
// Verify slot indices are deleted
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
}
return nil
})
require.NoError(t, err)
// Verify blocks from epochs 1-3 still exist
for i := slotsPerEpoch; i < slotsPerEpoch*4; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Verify block exists
assert.Equal(t, true, db.HasBlock(ctx, root))
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
if i < slotsPerEpoch*4-1 {
// Verify slot indices exist before deletion
err = db.db.View(func(tx *bolt.Tx) error {
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < uint64(tt.deleteBeforeSlot); i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
}
return nil
})
require.NoError(t, err)
}
// Verify state exists
hasState := db.HasState(ctx, root)
assert.Equal(t, true, hasState)
// Delete data before slot
slotsDeleted, err := db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(tt.deleteBeforeSlot), tt.batchSize)
require.NoError(t, err)
// Verify state summary exists
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, true, hasSummary)
var startSlotDeleted, endSlotDeleted uint64
if tt.batchSize >= int(tt.deleteBeforeSlot) {
startSlotDeleted = 1
endSlotDeleted = tt.deleteBeforeSlot
} else {
startSlotDeleted = tt.deleteBeforeSlot - uint64(tt.batchSize) + 1
endSlotDeleted = tt.deleteBeforeSlot
}
// Verify slot indices still exist
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
require.Equal(t, endSlotDeleted-startSlotDeleted+1, uint64(slotsDeleted))
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
return nil
// Verify blocks before given slot/batch are deleted
for i := startSlotDeleted; i < endSlotDeleted; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Check block is deleted
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks), fmt.Sprintf("Expected %d blocks, got %d for slot %d", 0, len(retrievedBlocks), i))
// Verify block does not exist
assert.Equal(t, false, db.HasBlock(ctx, root), fmt.Sprintf("Expected block index to not exist for slot %d", i))
// Verify block parent root does not exist
err = db.db.View(func(tx *bolt.Tx) error {
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
// Verify state is deleted
hasState := db.HasState(ctx, root)
assert.Equal(t, false, hasState)
// Verify state summary is deleted
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, false, hasSummary)
// Verify validator hashes for block roots are deleted
err = db.db.View(func(tx *bolt.Tx) error {
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
}
// Verify slot indices are deleted
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := startSlotDeleted; i < endSlotDeleted; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
}
return nil
})
require.NoError(t, err)
// Verify blocks from expectedLastDeletedSlot till numEpochs still exist
for i := endSlotDeleted; i < slotsPerEpoch*tt.numOfEpochs; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Verify block exists
assert.Equal(t, true, db.HasBlock(ctx, root))
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
if i < slotsPerEpoch*tt.numOfEpochs-1 {
err = db.db.View(func(tx *bolt.Tx) error {
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
return nil
})
require.NoError(t, err)
}
// Verify state exists
hasState := db.HasState(ctx, root)
assert.Equal(t, true, hasState)
// Verify state summary exists
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, true, hasSummary)
// Verify slot indices still exist
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
return nil
})
require.NoError(t, err)
// Verify validator entries still exist
valsActual, err := db.validatorEntries(ctx, root)
require.NoError(t, err)
assert.NotNil(t, valsActual)
// Verify remaining validator hashes for block roots exists
err = db.db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
return nil
})
require.NoError(t, err)
}
})
require.NoError(t, err)
// Verify validator entries still exist
valsActual, err := db.validatorEntries(ctx, root)
require.NoError(t, err)
assert.NotNil(t, valsActual)
// Verify remaining validator hashes for block roots exists
err = db.db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
return nil
})
require.NoError(t, err)
}
}
func TestStore_GenesisBlock(t *testing.T) {

View File

@@ -820,30 +820,25 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
// no need to construct the validator entries as it is not used here.
s, err := s.unmarshalState(ctx, enc, nil)
if err != nil {
return 0, err
return 0, errors.Wrap(err, "could not unmarshal state")
}
if s == nil || s.IsNil() {
return 0, errors.New("state can't be nil")
}
return s.Slot(), nil
}
b := &ethpb.SignedBeaconBlock{}
err := decode(ctx, enc, b)
b, err := unmarshalBlock(ctx, enc)
if err != nil {
return 0, errors.Wrap(err, "could not unmarshal block")
}
if err := blocks.BeaconBlockIsNil(b); err != nil {
return 0, err
}
wsb, err := blocks.NewSignedBeaconBlock(b)
if err != nil {
return 0, err
}
if err := blocks.BeaconBlockIsNil(wsb); err != nil {
return 0, err
}
return b.Block.Slot, nil
return b.Block().Slot(), nil
}
stateSummary := &ethpb.StateSummary{}
if err := decode(ctx, enc, stateSummary); err != nil {
return 0, err
return 0, errors.Wrap(err, "could not unmarshal state summary")
}
return stateSummary.Slot, nil
}

View File

@@ -5,7 +5,6 @@ import (
"crypto/rand"
"encoding/binary"
mathRand "math/rand"
"strconv"
"testing"
"time"
@@ -1070,6 +1069,31 @@ func TestBellatrixState_CanDelete(t *testing.T) {
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestBellatrixState_CanDeleteWithBlock(t *testing.T) {
db := setupDB(t)
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 100
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
require.NoError(t, db.DeleteState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)

View File

@@ -16,6 +16,15 @@ import (
var log = logrus.WithField("prefix", "db-pruner")
const (
// defaultPrunableBatchSize is the number of slots that can be pruned at once.
defaultPrunableBatchSize = 32
// defaultPruningWindow is the duration of one pruning window.
defaultPruningWindow = time.Second * 3
// defaultNumBatchesToPrune is the number of batches to prune in one pruning window.
defaultNumBatchesToPrune = 15
)
type ServiceOption func(*Service)
// WithRetentionPeriod allows the user to specify a different data retention period than the spec default.
@@ -143,14 +152,17 @@ func (p *Service) prune(slot primitives.Slot) error {
}).Debug("Pruning chain data")
tt := time.Now()
if err := p.db.DeleteHistoricalDataBeforeSlot(p.ctx, pruneUpto); err != nil {
return errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
numBatches, err := p.pruneBatches(pruneUpto)
if err != nil {
return errors.Wrap(err, "failed to prune batches")
}
log.WithFields(logrus.Fields{
"prunedUpto": pruneUpto,
"duration": time.Since(tt),
"currentSlot": slot,
"batchSize": defaultPrunableBatchSize,
"numBatches": numBatches,
}).Debug("Successfully pruned chain data")
// Update pruning checkpoint.
@@ -159,6 +171,33 @@ func (p *Service) prune(slot primitives.Slot) error {
return nil
}
func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
ctx, cancel := context.WithTimeout(p.ctx, defaultPruningWindow)
defer cancel()
numBatches := 0
for {
select {
case <-ctx.Done():
return numBatches, nil
default:
for i := 0; i < defaultNumBatchesToPrune; i++ {
slotsDeleted, err := p.db.DeleteHistoricalDataBeforeSlot(ctx, pruneUpto, defaultPrunableBatchSize)
if err != nil {
return 0, errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
}
// Return if there's nothing to delete.
if slotsDeleted == 0 {
return numBatches, nil
}
numBatches++
}
}
}
}
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
return func(current primitives.Slot) primitives.Slot {

View File

@@ -25,14 +25,12 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/execution/types:go_default_library",
@@ -99,7 +97,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",

View File

@@ -13,8 +13,6 @@ import (
gethRPC "github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -46,16 +44,11 @@ var (
GetPayloadMethodV3,
GetPayloadBodiesByHashV1,
GetPayloadBodiesByRangeV1,
GetBlobsV1,
}
electraEngineEndpoints = []string{
NewPayloadMethodV4,
GetPayloadMethodV4,
}
fuluEngineEndpoints = []string{
GetPayloadMethodV5,
GetBlobsV2,
}
)
const (
@@ -80,8 +73,6 @@ const (
GetPayloadMethodV3 = "engine_getPayloadV3"
// GetPayloadMethodV4 is the get payload method added for electra
GetPayloadMethodV4 = "engine_getPayloadV4"
// GetPayloadMethodV5 is the get payload method added for fulu
GetPayloadMethodV5 = "engine_getPayloadV5"
// BlockByHashMethod request string for JSON-RPC.
BlockByHashMethod = "eth_getBlockByHash"
// BlockByNumberMethod request string for JSON-RPC.
@@ -94,15 +85,8 @@ const (
ExchangeCapabilities = "engine_exchangeCapabilities"
// GetBlobsV1 request string for JSON-RPC.
GetBlobsV1 = "engine_getBlobsV1"
// GetBlobsV2 request string for JSON-RPC.
GetBlobsV2 = "engine_getBlobsV2"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
// defaultEngineTimeout = time.Second
defaultEngineTimeout = 2 * time.Second
defaultEngineTimeout = time.Second
)
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
@@ -124,7 +108,6 @@ type Reconstructor interface {
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -274,9 +257,6 @@ func (s *Service) ForkchoiceUpdated(
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
pe := slots.ToEpoch(slot)
if pe >= params.BeaconConfig().FuluForkEpoch {
return GetPayloadMethodV5, &pb.ExecutionBundleElectra{}
}
if pe >= params.BeaconConfig().ElectraForkEpoch {
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
}
@@ -309,7 +289,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
}
res, err := blocks.NewGetPayloadResponse(result)
if err != nil {
return nil, errors.Wrap(err, "new get payload response")
return nil, err
}
return res, nil
}
@@ -322,9 +302,6 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
if params.ElectraEnabled() {
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
}
if params.FuluEnabled() {
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
}
var result []string
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
if err != nil {
@@ -518,10 +495,9 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
defer span.End()
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
if !s.capabilityCache.has(GetBlobsV1) {
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
return nil, nil
}
result := make([]*pb.BlobAndProof, len(versionedHashes))
@@ -529,19 +505,6 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
return result, handleRPCError(err)
}
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
defer span.End()
if !s.capabilityCache.has(GetBlobsV2) {
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
}
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
return result, handleRPCError(err)
}
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
@@ -652,84 +615,6 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
return verifiedBlobs, nil
}
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
// and constructs the corresponding verified read-only data column sidecars.
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
blockBody := block.Block().Body()
kzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
}
// Collect KZG hashes for all blobs
var kzgHashes []common.Hash
for _, commitment := range kzgCommitments {
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
}
// Fetch all blobsAndCellsProofs from EL
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
}
var cellsAndProofs []kzg.CellsAndProofs
for _, blobAndProof := range blobAndProofV2s {
if blobAndProof == nil {
return nil, wrapWithBlockRoot(errors.New("unable to reconstruct data column sidecars, did not get all blobs from EL"), blockRoot, "")
}
var blob kzg.Blob
copy(blob[:], blobAndProof.Blob)
cells, err := kzg.ComputeCells(&blob)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "could not compute cells")
}
proofs := make([]kzg.Proof, len(blobAndProof.KzgProofs))
for i, proof := range blobAndProof.KzgProofs {
proofs[i] = kzg.Proof(proof)
}
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
Cells: cells,
Proofs: proofs,
})
}
header, err := block.Header()
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "could not get header")
}
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "could not get Merkle proof for KZG commitments")
}
dataColumnSidecars, err := peerdas.DataColumnSidecarsForReconstruct(kzgCommitments, header, kzgCommitmentsInclusionProof, cellsAndProofs)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "could not reconstruct data column sidecars")
}
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, len(dataColumnSidecars))
for i, dataColumnSidecar := range dataColumnSidecars {
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
}
verifiedRODataColumns[i] = blocks.NewVerifiedRODataColumn(roDataColumn)
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", blockRoot),
"slot": block.Block().Slot(),
}).Debug("Data columns successfully reconstructed from EL")
return verifiedRODataColumns, nil
}
func fullPayloadFromPayloadBody(
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
) (interfaces.ExecutionData, error) {
@@ -1017,8 +902,3 @@ func toBlockNumArg(number *big.Int) string {
}
return hexutil.EncodeBig(number)
}
// wrapWithBlockRoot returns a new error with the given block root.
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
@@ -318,11 +317,11 @@ func TestClient_HTTP(t *testing.T) {
require.DeepEqual(t, uint64(2), g)
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
})
t.Run(GetPayloadMethodV4, func(t *testing.T) {
payloadId := [8]byte{1}
@@ -373,11 +372,11 @@ func TestClient_HTTP(t *testing.T) {
require.DeepEqual(t, uint64(2), g)
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
requests := &pb.ExecutionRequests{
Deposits: []*pb.DepositRequest{
{
@@ -2425,7 +2424,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
t.Run("get-blobs end point is not supported", func(t *testing.T) {
hi := mockSummary(t, []bool{true, true, true, true, true, false})
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
require.NoError(t, err)
require.Equal(t, 0, len(verifiedBlobs))
})
@@ -2477,64 +2476,6 @@ func TestReconstructBlobSidecars(t *testing.T) {
})
}
func TestReconstructDataColumnSidecars(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup right fork epoch
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
client := &Service{capabilityCache: &capabilityCache{}}
b := util.NewBeaconBlockFulu()
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
kzgCommitments := createRandomKzgCommitments(t, 6)
b.Block.Body.BlobKzgCommitments = kzgCommitments
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
require.ErrorContains(t, "get blobs V2 for block", err)
})
t.Run("receiving all blobs", func(t *testing.T) {
blobMasks := []bool{true, true, true, true, true, true}
srv := createBlobServerV2(t, 6, blobMasks)
defer srv.Close()
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
})
t.Run("missing some blobs", func(t *testing.T) {
blobMasks := []bool{false, true, true, true, true, true}
srv := createBlobServerV2(t, 6, blobMasks)
defer srv.Close()
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
require.ErrorContains(t, "unable to reconstruct data column sidecars, did not get all blobs from EL", err)
require.Equal(t, 0, len(dataColumns))
})
}
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
kzgCommitments := make([][]byte, num)
for i := range kzgCommitments {
@@ -2570,42 +2511,6 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
}))
}
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
require.Equal(t, len(blobMasks), numBlobs)
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
for i := range blobAndCellProofs {
if !blobMasks[i] {
continue
}
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
Blob: []byte("0xblob"),
KzgProofs: []hexutil.Bytes{},
}
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
cellProof := make([]byte, 48)
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
}
}
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": blobAndCellProofs,
}
err := json.NewEncoder(w).Encode(respJSON)
require.NoError(t, err)
}))
}
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
rpcClient, err := rpc.DialHTTP(url)
require.NoError(t, err)
@@ -2617,12 +2522,6 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
return rpcClient, client
}
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
rpcClient, client := setupRpcClient(t, url, client)
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
return rpcClient, client
}
func testNewBlobVerifier() verification.NewBlobVerifier {
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
return &verification.MockBlobVerifier{

View File

@@ -38,8 +38,6 @@ type EngineClient struct {
ErrGetPayload error
BlobSidecars []blocks.VerifiedROBlob
ErrorBlobSidecars error
DataColumnSidecars []blocks.VerifiedRODataColumn
ErrorDataColumnSidecars error
}
// NewPayload --
@@ -115,10 +113,6 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
return e.BlobSidecars, e.ErrorBlobSidecars
}
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
ttd := new(big.Int)

View File

@@ -156,6 +156,10 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
if n.parent != nil {
parentRoot = n.parent.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
@@ -169,6 +173,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
ExecutionOptimistic: n.optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
Target: target[:],
}
if n.optimistic {
thisNode.Validity = forkchoice2.Optimistic

View File

@@ -1,8 +1,6 @@
package doublylinkedtree
import (
"fmt"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
@@ -15,7 +13,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
} else {
previousNode.balance -= s.previousProposerBoostScore
}
@@ -24,7 +22,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
} else {
proposerScore = (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
currentNode.balance += proposerScore

View File

@@ -252,6 +252,13 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
return roots, slots
}
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
if f.store.highestReceivedNode == nil {
return [32]byte{}
}
return f.store.highestReceivedNode.root
}
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
if f.store.highestReceivedNode == nil {

View File

@@ -65,6 +65,7 @@ type FastGetter interface {
FinalizedPayloadBlockHash() [32]byte
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)

View File

@@ -114,6 +114,13 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return ro.getter.HighestReceivedBlockSlot()
}
// HighestReceivedBlockRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()

View File

@@ -29,6 +29,7 @@ const (
unrealizedJustifiedPayloadBlockHashCalled
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
@@ -252,6 +253,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
return 0
}
func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
ro.calls = append(ro.calls, highestReceivedBlockRootCalled)
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0

View File

@@ -22,7 +22,6 @@ go_library(
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/kv:go_default_library",

View File

@@ -166,7 +166,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
}
if !cliCtx.IsSet(flags.SuggestedFeeRecipient.Name) {
log.Warnf("In order to receive transaction fees from proposing blocks, " +
log.Warn("In order to receive transaction fees from proposing blocks, " +
"you must provide flag --" + flags.SuggestedFeeRecipient.Name + " with a valid ethereum address when starting your beacon node. " +
"Please see our documentation for more information on this requirement (https://docs.prylabs.network/docs/execution-node/fee-recipient).")
return nil

View File

@@ -26,7 +26,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
@@ -87,45 +86,43 @@ type serviceFlagOpts struct {
// full PoS node. It handles the lifecycle of the entire system and registers
// services to a service registry.
type BeaconNode struct {
cliCtx *cli.Context
ctx context.Context
cancel context.CancelFunc
services *runtime.ServiceRegistry
lock sync.RWMutex
stop chan struct{} // Channel to wait for termination notifications.
db db.Database
slasherDB db.SlasherDatabase
attestationCache *cache.AttestationCache
attestationPool attestations.Pool
exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
depositCache cache.DepositCache
trackedValidatorsCache *cache.TrackedValidatorsCache
payloadIDCache *cache.PayloadIDCache
stateFeed *event.Feed
blockFeed *event.Feed
opFeed *event.Feed
stateGen *stategen.State
collector *bcnodeCollector
slasherBlockHeadersFeed *event.Feed
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
GenesisInitializer genesis.Initializer
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
BlobStorageOptions []filesystem.BlobStorageOption
DataColumnStorage *filesystem.DataColumnStorage
DataColumnStorageOptions []filesystem.DataColumnStorageOption
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
custodyInfo *peerdas.CustodyInfo
cliCtx *cli.Context
ctx context.Context
cancel context.CancelFunc
services *runtime.ServiceRegistry
lock sync.RWMutex
stop chan struct{} // Channel to wait for termination notifications.
db db.Database
slasherDB db.SlasherDatabase
attestationCache *cache.AttestationCache
attestationPool attestations.Pool
exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
depositCache cache.DepositCache
trackedValidatorsCache *cache.TrackedValidatorsCache
payloadIDCache *cache.PayloadIDCache
stateFeed *event.Feed
blockFeed *event.Feed
opFeed *event.Feed
stateGen *stategen.State
collector *bcnodeCollector
slasherBlockHeadersFeed *event.Feed
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
GenesisInitializer genesis.Initializer
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
BlobStorageOptions []filesystem.BlobStorageOption
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
slasherEnabled bool
}
// New creates a new node instance, sets up configuration options, and registers
@@ -163,7 +160,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
custodyInfo: &peerdas.CustodyInfo{},
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
}
for _, opt := range opts {
@@ -191,15 +188,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.BlobStorage = blobs
}
if beacon.DataColumnStorage == nil {
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
if err != nil {
return nil, errors.Wrap(err, "new data column storage")
}
beacon.DataColumnStorage = dataColumnStorage
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
@@ -290,9 +278,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, errors.Wrap(err, "could not start DB")
}
beacon.BlobStorage.WarmCache()
beacon.DataColumnStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
@@ -358,7 +344,7 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
return errors.Wrap(err, "could not register slashing pool service")
}
log.Debugln("Registering Slasher Service")
log.WithField("enabled", beacon.slasherEnabled).Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
@@ -504,10 +490,6 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
return nil, errors.Wrap(err, "could not clear blob storage")
}
if err := b.DataColumnStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear data column storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
@@ -607,7 +589,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
if !features.Get().EnableSlasher {
if !b.slasherEnabled {
return nil
}
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
@@ -714,7 +696,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
CustodyInfo: b.custodyInfo,
})
if err != nil {
return err
@@ -793,11 +774,10 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithClockSynchronizer(gs),
blockchain.WithSyncComplete(syncComplete),
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithDataColumnStorage(b.DataColumnStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithCustodyInfo(b.custodyInfo),
blockchain.WithSlasherEnabled(b.slasherEnabled),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -880,11 +860,9 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithInitialSyncComplete(initialSyncComplete),
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithDataColumnStorage(b.DataColumnStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
regularsync.WithCustodyInfo(b.custodyInfo),
regularsync.WithSlasherEnabled(b.slasherEnabled),
)
return b.services.RegisterService(rs)
}
@@ -908,14 +886,12 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
ClockWaiter: b.clockWaiter,
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
CustodyInfo: b.custodyInfo,
}, opts...)
return b.services.RegisterService(is)
}
func (b *BeaconNode) registerSlasherService() error {
if !features.Get().EnableSlasher {
if !b.slasherEnabled {
return nil
}
var chainService *blockchain.Service
@@ -962,7 +938,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
}
var slasherService *slasher.Service
if features.Get().EnableSlasher {
if b.slasherEnabled {
if err := b.services.FetchService(&slasherService); err != nil {
return err
}
@@ -1004,7 +980,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
@@ -1032,7 +1007,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
Router: router,
ClockWaiter: b.clockWaiter,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
})
@@ -1172,7 +1146,6 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
// TODO: Add backfill for data column storage
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
if err != nil {
return errors.Wrap(err, "error initializing backfill service")

View File

@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
ctx, cancel := newCliContextWithCancel(&app, set)
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
require.NoError(t, err)
node.Close()
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
WithBuilderFlagOptions([]builder.Option{}),
WithExecutionChainOptions([]execution.Option{}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
require.NoError(t, err)
node.services = &runtime.ServiceRegistry{}
go func() {
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
WithBuilderFlagOptions([]builder.Option{}),
WithExecutionChainOptions([]execution.Option{}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
require.NoError(t, err)
go func() {
node.Start()
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
context, cancel := newCliContextWithCancel(&app, set)
options := []Option{
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
}
_, err = New(context, cancel, options...)
require.NoError(t, err)

View File

@@ -50,20 +50,3 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {
bn.DataColumnStorage = bs
return nil
}
}
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
// to be used when initializing data column storage.
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
return func(bn *BeaconNode) error {
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
return nil
}
}

View File

@@ -9,6 +9,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
)
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
@@ -60,7 +61,8 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
for _, att := range unAggregatedAtts {
seen, err := c.hasSeenBit(att)
if err != nil {
return nil, err
log.WithError(err).Debug("Could not check if attestations bits have been seen")
continue
}
if !seen {
atts = append(atts, att.Clone())
@@ -163,7 +165,13 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
if att == nil || att.IsNil() || att.IsAggregated() {
continue
}
if seen, err := c.hasSeenBit(att); err == nil && seen {
seen, err := c.hasSeenBit(att)
if err != nil {
log.WithError(err).Debug("Could not check if attestations bits have been seen")
delete(c.unAggregatedAtt, r)
count++
}
if seen {
delete(c.unAggregatedAtt, r)
count++
}

View File

@@ -7,7 +7,6 @@ go_library(
"broadcaster.go",
"config.go",
"connection_gater.go",
"custody.go",
"dial_relay_node.go",
"discovery.go",
"doc.go",
@@ -46,7 +45,6 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
@@ -57,7 +55,6 @@ go_library(
"//beacon-chain/startup:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
@@ -119,7 +116,6 @@ go_test(
"addr_factory_test.go",
"broadcaster_test.go",
"connection_gater_test.go",
"custody_test.go",
"dial_relay_node_test.go",
"discovery_test.go",
"fork_test.go",
@@ -141,11 +137,9 @@ go_test(
flaky = True,
tags = ["requires-network"],
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
@@ -158,7 +152,6 @@ go_test(
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/leaky-bucket:go_default_library",
@@ -169,7 +162,6 @@ go_test(
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
@@ -97,12 +96,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) internalBroadcastAttestation(
ctx context.Context,
subnet uint64,
att ethpb.Att,
forkDigest [fieldparams.VersionLength]byte,
) {
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -158,7 +152,7 @@ func (s *Service) internalBroadcastAttestation(
}
}
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -234,12 +228,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
return nil
}
func (s *Service) internalBroadcastBlob(
ctx context.Context,
subnet uint64,
blobSidecar *ethpb.BlobSidecar,
forkDigest [fieldparams.VersionLength]byte,
) {
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -254,7 +243,7 @@ func (s *Service) internalBroadcastBlob(
s.subnetLocker(wrappedSubIdx).RUnlock()
if !hasPeer {
blobSidecarBroadcastAttempts.Inc()
blobSidecarCommitteeBroadcastAttempts.Inc()
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
@@ -263,7 +252,7 @@ func (s *Service) internalBroadcastBlob(
return err
}
if ok {
blobSidecarBroadcasts.Inc()
blobSidecarCommitteeBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
@@ -279,120 +268,6 @@ func (s *Service) internalBroadcastBlob(
}
}
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input column subnet.
// TODO: Add tests
func (s *Service) BroadcastDataColumn(
ctx context.Context,
root [fieldparams.RootLength]byte,
columnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
defer span.End()
// Ensure the data column sidecar is not nil.
if dataColumnSidecar == nil {
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
}
// Retrieve the current fork digest.
forkDigest, err := s.currentForkDigest()
if err != nil {
err := errors.Wrap(err, "current fork digest")
tracing.AnnotateError(span, err)
return err
}
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumn(ctx, root, columnSubnet, dataColumnSidecar, forkDigest)
return nil
}
func (s *Service) internalBroadcastDataColumn(
ctx context.Context,
root [fieldparams.RootLength]byte,
columnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
forkDigest [fieldparams.VersionLength]byte,
) {
// Add tracing to the function.
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
defer span.End()
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Inc()
// Clear parent context / deadline.
ctx = trace.NewContext(context.Background(), span)
// Define a one-slot length context timeout.
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, oneSlot)
defer cancel()
// Build the topic corresponding to this column subnet and this fork digest.
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
// Compute the wrapped subnet index.
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
// Check if we have peers with this subnet.
hasPeer := func() bool {
s.subnetLocker(wrappedSubIdx).RLock()
defer s.subnetLocker(wrappedSubIdx).RUnlock()
return s.hasPeerWithSubnet(topic)
}()
// If no peers are found, attempt to find peers with this subnet.
if !hasPeer {
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
if err != nil {
return errors.Wrap(err, "find peers for subnet")
}
if ok {
return nil
}
return errors.New("failed to find peers for subnet")
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
}
}
// Broadcast the data column sidecar to the network.
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
log.WithError(err).Error("Failed to broadcast data column sidecar")
tracing.AnnotateError(span, err)
}
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
slot := header.GetSlot()
slotStartTime, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
log.WithError(err).Error("Failed to convert slot to time")
}
log.WithFields(logrus.Fields{
"slot": slot,
"timeSinceSlotStart": time.Since(slotStartTime),
"root": fmt.Sprintf("%#x", root),
"columnSubnet": columnSubnet,
}).Debug("Broadcasted data column sidecar")
// Increase the number of successful broadcasts.
dataColumnSidecarBroadcasts.Inc()
}
// method to broadcast messages to other peers in our gossip mesh.
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
@@ -422,18 +297,14 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
return nil
}
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
}
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
}
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
}
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
}

View File

@@ -12,16 +12,11 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
"github.com/prysmaticlabs/go-bitfield"
"google.golang.org/protobuf/proto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -29,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"google.golang.org/protobuf/proto"
)
func TestService_Broadcast(t *testing.T) {
@@ -520,88 +516,3 @@ func TestService_BroadcastBlob(t *testing.T) {
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
}
func TestService_BroadcastDataColumn(t *testing.T) {
require.NoError(t, kzg.Start())
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
p := &Service{
host: p1.BHost,
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
}),
}
var (
comts [][]byte
blobs []kzg.Blob
)
for i := int64(0); i < 6; i++ {
blob := kzg.Blob(util.GetRandBlob(i))
commitment, err := kzg.BlobToKZGCommitment(&blob)
require.NoError(t, err)
comts = append(comts, commitment[:])
blobs = append(blobs, blob)
}
b := util.NewBeaconBlockFulu()
b.Block.Body.BlobKzgCommitments = comts
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
for i := range blobs {
blobs[i] = kzg.Blob(util.GetRandBlob(int64(i)))
}
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
sidecars, err := peerdas.DataColumnSidecars(sb, cellsAndProofs)
require.NoError(t, err)
sidecar := sidecars[0]
subnet := uint64(0)
topic := DataColumnSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
wg.Add(1)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
msg, err := sub.Next(ctx)
require.NoError(t, err)
result := &ethpb.DataColumnSidecar{}
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
require.DeepEqual(t, result, sidecar)
}(t)
// Attempt to broadcast nil object should fail.
ctx := context.Background()
var root [fieldparams.RootLength]byte
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, root, subnet, nil))
// Broadcast to peers and wait.
require.NoError(t, p.BroadcastDataColumn(ctx, root, subnet, sidecar))
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
}

View File

@@ -4,7 +4,6 @@ import (
"time"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
)
@@ -39,7 +38,6 @@ type Config struct {
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
CustodyInfo *peerdas.CustodyInfo
}
// validateConfig validates whether the values provided are accurate and will set

Some files were not shown because too many files have changed in this diff Show More