mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
35 Commits
peerdas-de
...
v5.3.1-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
863eee7b40 | ||
|
|
6d89373583 | ||
|
|
9a421a2feb | ||
|
|
4e41d5c610 | ||
|
|
0b6bea43a8 | ||
|
|
f89afb0fbd | ||
|
|
3cd2973c92 | ||
|
|
d3e5710a63 | ||
|
|
f40b4f16c2 | ||
|
|
7fd4f746d6 | ||
|
|
2362d9f3c2 | ||
|
|
6b84f8c6b1 | ||
|
|
997a9112d1 | ||
|
|
d46ca97680 | ||
|
|
417bbf8a9e | ||
|
|
a7b016c954 | ||
|
|
6015493de9 | ||
|
|
c718bdbe2b | ||
|
|
0a8f947169 | ||
|
|
d7efccf6a5 | ||
|
|
334920bc9e | ||
|
|
6e00db433c | ||
|
|
c6344e7c3e | ||
|
|
2131254722 | ||
|
|
b6d1866deb | ||
|
|
e56f489d06 | ||
|
|
bf62afb27c | ||
|
|
8369056027 | ||
|
|
09499a732f | ||
|
|
2ee015452c | ||
|
|
ffc1bf8bbe | ||
|
|
014dbd5c3a | ||
|
|
9bceaa59d2 | ||
|
|
832ebb3f39 | ||
|
|
8345c271cc |
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.23-alpine
|
||||
FROM golang:1.24-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
16
.github/workflows/go.yml
vendored
16
.github/workflows/go.yml
vendored
@@ -28,15 +28,15 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go 1.23
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.19.0
|
||||
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.22.1
|
||||
gosec -exclude-generated -exclude=G307,G115 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.23
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.63.4
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -75,6 +75,7 @@ linters:
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
||||
@@ -165,7 +165,7 @@ STATICCHECK_ANALYZERS = [
|
||||
"sa6006",
|
||||
"sa9001",
|
||||
"sa9002",
|
||||
#"sa9003", # Doesn't build. See https://github.com/dominikh/go-tools/pull/1483
|
||||
"sa9003",
|
||||
"sa9004",
|
||||
"sa9005",
|
||||
"sa9006",
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -160,15 +160,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -210,7 +210,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.23.5",
|
||||
go_version = "1.24.0",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -431,7 +431,7 @@ gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
gazelle_dependencies(go_sdk = "go_sdk")
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -108,7 +109,7 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
hc: &http.Client{Transport: otelhttp.NewTransport(http.DefaultTransport)},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
|
||||
@@ -4,9 +4,11 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block.go",
|
||||
"block_execution.go",
|
||||
"conversions.go",
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
@@ -47,10 +49,16 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["conversions_test.go"],
|
||||
srcs = [
|
||||
"conversions_block_execution_test.go",
|
||||
"conversions_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -186,40 +186,6 @@ type BlindedBeaconBlockBodyBellatrix struct {
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -298,42 +264,6 @@ type BlindedBeaconBlockBodyCapella struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -426,46 +356,6 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -560,14 +450,6 @@ type BlindedBeaconBlockBodyElectra struct {
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type (
|
||||
ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
157
api/server/structs/block_execution.go
Normal file
157
api/server/structs/block_execution.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package structs
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -889,126 +889,6 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
|
||||
}
|
||||
}
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
973
api/server/structs/conversions_block_execution.go
Normal file
973
api/server/structs/conversions_block_execution.go
Normal file
@@ -0,0 +1,973 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadFromConsensus(payload *enginev1.ExecutionPayload) (*ExecutionPayload, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayload{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayload) ToConsensus() (*enginev1.ExecutionPayload, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
payloadTxs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionPayload{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: payloadTxs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderFromConsensus(payload *enginev1.ExecutionPayloadHeader) (*ExecutionPayloadHeader, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeader{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeader) ToConsensus() (*enginev1.ExecutionPayloadHeader, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
|
||||
return &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadCapellaFromConsensus(payload *enginev1.ExecutionPayloadCapella) (*ExecutionPayloadCapella, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayloadCapella{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadCapella) ToConsensus() (*enginev1.ExecutionPayloadCapella, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
payloadTxs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: payloadTxs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderCapellaFromConsensus(payload *enginev1.ExecutionPayloadHeaderCapella) (*ExecutionPayloadHeaderCapella, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeaderCapella{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeaderCapella) ToConsensus() (*enginev1.ExecutionPayloadHeaderCapella, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithMaxLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithMaxLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithMaxLength(e.WithdrawalsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadDenebFromConsensus(payload *enginev1.ExecutionPayloadDeneb) (*ExecutionPayloadDeneb, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayloadDeneb{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadDeneb) ToConsensus() (*enginev1.ExecutionPayloadDeneb, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
txs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderDenebFromConsensus(payload *enginev1.ExecutionPayloadHeaderDeneb) (*ExecutionPayloadHeaderDeneb, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeaderDeneb) ToConsensus() (*enginev1.ExecutionPayloadHeaderDeneb, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithLength(e.WithdrawalsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
|
||||
}
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
ExecutionPayloadElectraFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderElectraFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
)
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
if w == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "WithdrawalRequest")
|
||||
}
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
if c == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ConsolidationRequest")
|
||||
}
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
if d == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "DepositRequest")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionRequestsFromConsensus(er *enginev1.ExecutionRequests) *ExecutionRequests {
|
||||
return &ExecutionRequests{
|
||||
Deposits: DepositRequestsFromConsensus(er.Deposits),
|
||||
Withdrawals: WithdrawalRequestsFromConsensus(er.Withdrawals),
|
||||
Consolidations: ConsolidationRequestsFromConsensus(er.Consolidations),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ExecutionRequests) ToConsensus() (*enginev1.ExecutionRequests, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionRequests")
|
||||
}
|
||||
var err error
|
||||
if err = slice.VerifyMaxLength(e.Deposits, params.BeaconConfig().MaxDepositRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(e.Deposits))
|
||||
for i, d := range e.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(e.Withdrawals, params.BeaconConfig().MaxWithdrawalRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(e.Consolidations, params.BeaconConfig().MaxConsolidationsRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(e.Consolidations))
|
||||
for i, c := range e.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
|
||||
)
|
||||
563
api/server/structs/conversions_block_execution_test.go
Normal file
563
api/server/structs/conversions_block_execution_test.go
Normal file
@@ -0,0 +1,563 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func fillByteSlice(sliceLength int, value byte) []byte {
|
||||
bytes := make([]byte, sliceLength)
|
||||
|
||||
for index := range bytes {
|
||||
bytes[index] = value
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
// TestExecutionPayloadFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusPayload := &enginev1.ExecutionPayload{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 12345,
|
||||
GasLimit: 15000000,
|
||||
GasUsed: 8000000,
|
||||
Timestamp: 1680000000,
|
||||
ExtraData: fillByteSlice(8, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x01),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x22),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(10, 0x33),
|
||||
fillByteSlice(10, 0x44),
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadFromConsensus(consensusPayload)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.FeeRecipient), result.FeeRecipient)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.StateRoot), result.StateRoot)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.ReceiptsRoot), result.ReceiptsRoot)
|
||||
require.Equal(t, fmt.Sprintf("%d", consensusPayload.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayload_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayload).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayload_ToConsensus_HappyPath(t *testing.T) {
|
||||
payload := &ExecutionPayload{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "12345",
|
||||
GasLimit: "15000000",
|
||||
GasUsed: "8000000",
|
||||
Timestamp: "1680000000",
|
||||
ExtraData: "0x11111111",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x22)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(10, 0x33)),
|
||||
hexutil.Encode(fillByteSlice(10, 0x44)),
|
||||
},
|
||||
}
|
||||
|
||||
result, err := payload.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.ParentHash, fillByteSlice(common.HashLength, 0xaa))
|
||||
require.DeepEqual(t, result.FeeRecipient, fillByteSlice(20, 0xbb))
|
||||
require.DeepEqual(t, result.StateRoot, fillByteSlice(32, 0xcc))
|
||||
}
|
||||
|
||||
// TestExecutionPayloadHeaderFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadHeaderFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadHeaderFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusHeader := &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 9999,
|
||||
GasLimit: 5000000,
|
||||
GasUsed: 2500000,
|
||||
Timestamp: 1111111111,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderFromConsensus(consensusHeader)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(consensusHeader.ParentHash), result.ParentHash)
|
||||
require.Equal(t, fmt.Sprintf("%d", consensusHeader.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadHeader_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadHeader).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadHeader_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeader{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "9999",
|
||||
GasLimit: "5000000",
|
||||
GasUsed: "2500000",
|
||||
Timestamp: "1111111111",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadCapellaFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadCapellaFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadCapellaFromConsensus_HappyPath(t *testing.T) {
|
||||
capellaPayload := &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 9876543,
|
||||
GasUsed: 1234567,
|
||||
Timestamp: 5555555,
|
||||
ExtraData: fillByteSlice(6, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x22),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x33),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(5, 0x44),
|
||||
},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 2,
|
||||
Address: fillByteSlice(20, 0xaa),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadCapellaFromConsensus(capellaPayload)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(capellaPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, len(capellaPayload.Transactions), len(result.Transactions))
|
||||
require.Equal(t, len(capellaPayload.Withdrawals), len(result.Withdrawals))
|
||||
}
|
||||
|
||||
// TestExecutionPayloadCapella_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadCapella).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadCapella_ToConsensus_HappyPath(t *testing.T) {
|
||||
capella := &ExecutionPayloadCapella{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "123",
|
||||
GasLimit: "9876543",
|
||||
GasUsed: "1234567",
|
||||
Timestamp: "5555555",
|
||||
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(5, 0x44)),
|
||||
},
|
||||
Withdrawals: []*Withdrawal{
|
||||
{
|
||||
WithdrawalIndex: "1",
|
||||
ValidatorIndex: "2",
|
||||
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
|
||||
Amount: "100",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := capella.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), capella.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), capella.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), capella.StateRoot)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadDenebFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadDenebFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadDenebFromConsensus_HappyPath(t *testing.T) {
|
||||
denebPayload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 999,
|
||||
GasLimit: 2222222,
|
||||
GasUsed: 1111111,
|
||||
Timestamp: 666666,
|
||||
ExtraData: fillByteSlice(6, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x22),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x33),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(5, 0x44),
|
||||
},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 2,
|
||||
Address: fillByteSlice(20, 0xaa),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
BlobGasUsed: 1234,
|
||||
ExcessBlobGas: 5678,
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadDenebFromConsensus(denebPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(denebPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, len(denebPayload.Transactions), len(result.Transactions))
|
||||
require.Equal(t, len(denebPayload.Withdrawals), len(result.Withdrawals))
|
||||
require.Equal(t, "1234", result.BlobGasUsed)
|
||||
require.Equal(t, fmt.Sprintf("%d", denebPayload.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadDeneb_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadDeneb).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadDeneb_ToConsensus_HappyPath(t *testing.T) {
|
||||
deneb := &ExecutionPayloadDeneb{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "999",
|
||||
GasLimit: "2222222",
|
||||
GasUsed: "1111111",
|
||||
Timestamp: "666666",
|
||||
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(5, 0x44)),
|
||||
},
|
||||
Withdrawals: []*Withdrawal{
|
||||
{
|
||||
WithdrawalIndex: "1",
|
||||
ValidatorIndex: "2",
|
||||
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
|
||||
Amount: "100",
|
||||
},
|
||||
},
|
||||
BlobGasUsed: "1234",
|
||||
ExcessBlobGas: "5678",
|
||||
}
|
||||
|
||||
result, err := deneb.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), deneb.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), deneb.FeeRecipient)
|
||||
require.Equal(t, result.BlockNumber, uint64(999))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderCapellaFromConsensus_HappyPath(t *testing.T) {
|
||||
capellaHeader := &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 555,
|
||||
GasLimit: 1111111,
|
||||
GasUsed: 222222,
|
||||
Timestamp: 3333333333,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
WithdrawalsRoot: fillByteSlice(32, 0x99),
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderCapellaFromConsensus(capellaHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(capellaHeader.ParentHash), result.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(capellaHeader.WithdrawalsRoot), result.WithdrawalsRoot)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderCapella_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeaderCapella{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "555",
|
||||
GasLimit: "1111111",
|
||||
GasUsed: "222222",
|
||||
Timestamp: "3333333333",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ReceiptsRoot), header.ReceiptsRoot)
|
||||
require.DeepEqual(t, hexutil.Encode(result.WithdrawalsRoot), header.WithdrawalsRoot)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDenebFromConsensus_HappyPath(t *testing.T) {
|
||||
denebHeader := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 999,
|
||||
GasLimit: 5000000,
|
||||
GasUsed: 2500000,
|
||||
Timestamp: 4444444444,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
WithdrawalsRoot: fillByteSlice(32, 0x99),
|
||||
BlobGasUsed: 1234,
|
||||
ExcessBlobGas: 5678,
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderDenebFromConsensus(denebHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(denebHeader.ParentHash), result.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(denebHeader.FeeRecipient), result.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(denebHeader.StateRoot), result.StateRoot)
|
||||
require.DeepEqual(t, fmt.Sprintf("%d", denebHeader.BlobGasUsed), result.BlobGasUsed)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDeneb_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "999",
|
||||
GasLimit: "5000000",
|
||||
GasUsed: "2500000",
|
||||
Timestamp: "4444444444",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
|
||||
BlobGasUsed: "1234",
|
||||
ExcessBlobGas: "5678",
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, result.BlobGasUsed, uint64(1234))
|
||||
require.DeepEqual(t, result.ExcessBlobGas, uint64(5678))
|
||||
require.DeepEqual(t, result.BlockNumber, uint64(999))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusRequests := []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xbb),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xbb),
|
||||
Amount: 12345,
|
||||
},
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xcc),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xcc),
|
||||
Amount: 54321,
|
||||
},
|
||||
}
|
||||
|
||||
result := WithdrawalRequestsFromConsensus(consensusRequests)
|
||||
require.DeepEqual(t, len(result), len(consensusRequests))
|
||||
require.DeepEqual(t, result[0].Amount, fmt.Sprintf("%d", consensusRequests[0].Amount))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequestFromConsensus_HappyPath(t *testing.T) {
|
||||
req := &enginev1.WithdrawalRequest{
|
||||
SourceAddress: fillByteSlice(20, 0xbb),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xbb),
|
||||
Amount: 42,
|
||||
}
|
||||
result := WithdrawalRequestFromConsensus(req)
|
||||
require.NotNil(t, result)
|
||||
require.DeepEqual(t, result.SourceAddress, hexutil.Encode(fillByteSlice(20, 0xbb)))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequest_ToConsensus_HappyPath(t *testing.T) {
|
||||
withdrawalReq := &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 111)),
|
||||
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 123)),
|
||||
Amount: "12345",
|
||||
}
|
||||
result, err := withdrawalReq.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.Amount, uint64(12345))
|
||||
}
|
||||
|
||||
func TestConsolidationRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusRequests := []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 111),
|
||||
SourcePubkey: fillByteSlice(48, 112),
|
||||
TargetPubkey: fillByteSlice(48, 113),
|
||||
},
|
||||
}
|
||||
result := ConsolidationRequestsFromConsensus(consensusRequests)
|
||||
require.DeepEqual(t, len(result), len(consensusRequests))
|
||||
require.DeepEqual(t, result[0].SourceAddress, "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f")
|
||||
}
|
||||
|
||||
func TestDepositRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
ds := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: fillByteSlice(48, 0xbb),
|
||||
WithdrawalCredentials: fillByteSlice(32, 0xdd),
|
||||
Amount: 98765,
|
||||
Signature: fillByteSlice(96, 0xff),
|
||||
Index: 111,
|
||||
},
|
||||
}
|
||||
result := DepositRequestsFromConsensus(ds)
|
||||
require.DeepEqual(t, len(result), len(ds))
|
||||
require.DeepEqual(t, result[0].Amount, "98765")
|
||||
}
|
||||
|
||||
func TestDepositRequest_ToConsensus_HappyPath(t *testing.T) {
|
||||
req := &DepositRequest{
|
||||
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
|
||||
Amount: "123",
|
||||
Signature: hexutil.Encode(fillByteSlice(96, 0xdd)),
|
||||
Index: "456",
|
||||
}
|
||||
|
||||
result, err := req.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.Amount, uint64(123))
|
||||
require.DeepEqual(t, result.Signature, fillByteSlice(96, 0xdd))
|
||||
}
|
||||
|
||||
func TestExecutionRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
er := &enginev1.ExecutionRequests{
|
||||
Deposits: []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: fillByteSlice(48, 0xba),
|
||||
WithdrawalCredentials: fillByteSlice(32, 0xaa),
|
||||
Amount: 33,
|
||||
Signature: fillByteSlice(96, 0xff),
|
||||
Index: 44,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xaa),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xba),
|
||||
Amount: 555,
|
||||
},
|
||||
},
|
||||
Consolidations: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xdd),
|
||||
SourcePubkey: fillByteSlice(48, 0xdd),
|
||||
TargetPubkey: fillByteSlice(48, 0xcc),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := ExecutionRequestsFromConsensus(er)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, 1, len(result.Deposits))
|
||||
require.Equal(t, "33", result.Deposits[0].Amount)
|
||||
require.Equal(t, 1, len(result.Withdrawals))
|
||||
require.Equal(t, "555", result.Withdrawals[0].Amount)
|
||||
require.Equal(t, 1, len(result.Consolidations))
|
||||
require.Equal(t, "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", result.Consolidations[0].TargetPubkey)
|
||||
}
|
||||
|
||||
func TestExecutionRequests_ToConsensus_HappyPath(t *testing.T) {
|
||||
execReq := &ExecutionRequests{
|
||||
Deposits: []*DepositRequest{
|
||||
{
|
||||
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
|
||||
Amount: "33",
|
||||
Signature: hexutil.Encode(fillByteSlice(96, 0xff)),
|
||||
Index: "44",
|
||||
},
|
||||
},
|
||||
Withdrawals: []*WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xdd)),
|
||||
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
Amount: "555",
|
||||
},
|
||||
},
|
||||
Consolidations: []*ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xcc)),
|
||||
SourcePubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
TargetPubkey: hexutil.Encode(fillByteSlice(48, 0xcc)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := execReq.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(result.Deposits))
|
||||
require.Equal(t, uint64(33), result.Deposits[0].Amount)
|
||||
require.Equal(t, 1, len(result.Withdrawals))
|
||||
require.Equal(t, uint64(555), result.Withdrawals[0].Amount)
|
||||
require.Equal(t, 1, len(result.Consolidations))
|
||||
require.DeepEqual(t, fillByteSlice(48, 0xcc), result.Consolidations[0].TargetPubkey)
|
||||
}
|
||||
@@ -54,4 +54,5 @@ type ForkChoiceNodeExtraData struct {
|
||||
Balance string `json:"balance"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Target string `json:"target"`
|
||||
}
|
||||
|
||||
@@ -244,26 +244,6 @@ type Withdrawal struct {
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingDeposit struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
|
||||
@@ -125,7 +125,7 @@ func getChan(key string) chan byte {
|
||||
|
||||
// Return a new string with unique elements.
|
||||
func unique(arr []string) []string {
|
||||
if arr == nil || len(arr) <= 1 {
|
||||
if len(arr) <= 1 {
|
||||
return arr
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -96,6 +97,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
@@ -126,6 +128,7 @@ go_test(
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_forkchoice_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
@@ -154,6 +157,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -185,6 +189,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -582,6 +582,7 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
summ, err := beaconDB.StateSummary(ctx, br)
|
||||
|
||||
@@ -72,7 +72,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -159,6 +158,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
@@ -166,40 +166,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
func firePayloadAttributesEvent(_ context.Context, f event.SubscriberSender, nextSlot primitives.Slot) {
|
||||
// the fcu args have differing amounts of completeness based on the code path,
|
||||
// and there is work we only want to do if a client is actually listening to the events beacon api endpoint.
|
||||
// temporary solution: just fire a blank event and fill in the details in the api handler.
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
Data: payloadattribute.EventData{ProposalSlot: nextSlot},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -102,8 +102,10 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(args.headBlock); err != nil {
|
||||
if err := s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -18,6 +19,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
WithStateNotifier(&mock.MockStateNotifier{RecordEvents: true}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -213,3 +213,10 @@ func WithSyncChecker(checker Checker) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.slasherEnabled = enabled
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -368,7 +370,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -419,27 +421,102 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return nil
|
||||
}
|
||||
|
||||
// This removes the attestations in block `b` from the attestation mem pool.
|
||||
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := headBlock.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := s.cfg.AttestationCache.DeleteCovered(att); err != nil {
|
||||
return errors.Wrap(err, "could not delete attestation")
|
||||
}
|
||||
} else if att.IsAggregated() {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
// pruneAttsFromPool removes these attestations from the attestation pool
|
||||
// which are covered by attestations from the received block.
|
||||
func (s *Service) pruneAttsFromPool(ctx context.Context, headState state.BeaconState, headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
for _, att := range headBlock.Block().Body().Attestations() {
|
||||
if err := s.pruneCoveredAttsFromPool(ctx, headState, att); err != nil {
|
||||
log.WithError(err).Warn("Could not prune attestations covered by a received block's attestation")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) pruneCoveredAttsFromPool(ctx context.Context, headState state.BeaconState, att ethpb.Att) error {
|
||||
switch {
|
||||
case !att.IsAggregated():
|
||||
return s.cfg.AttPool.DeleteUnaggregatedAttestation(att)
|
||||
case att.Version() == version.Phase0:
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
return errors.Wrap(s.cfg.AttestationCache.DeleteCovered(att), "could not delete covered attestation")
|
||||
}
|
||||
return errors.Wrap(s.cfg.AttPool.DeleteAggregatedAttestation(att), "could not delete aggregated attestation")
|
||||
default:
|
||||
return s.pruneCoveredElectraAttsFromPool(ctx, headState, att)
|
||||
}
|
||||
}
|
||||
|
||||
// pruneCoveredElectraAttsFromPool handles removing aggregated Electra attestations from the pool after receiving a block.
|
||||
// Because in Electra block attestations can combine aggregates for multiple committees, comparing attestation bits
|
||||
// of a block attestation with attestations bits of an aggregate can cause unexpected results, leading to covered
|
||||
// aggregates not being removed from the pool.
|
||||
//
|
||||
// To make sure aggregates are removed, we decompose the block attestation into dummy aggregates, with each
|
||||
// aggregate accounting for one committee. This allows us to compare aggregates in the same way it's done for
|
||||
// Phase0. Even though we can't provide a valid signature for the dummy aggregate, it does not matter because
|
||||
// signatures play no part in pruning attestations.
|
||||
func (s *Service) pruneCoveredElectraAttsFromPool(ctx context.Context, headState state.BeaconState, att ethpb.Att) error {
|
||||
if att.Version() == version.Phase0 {
|
||||
log.Error("Called pruneCoveredElectraAttsFromPool with a Phase0 attestation")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't want to recompute committees. If they are not cached already,
|
||||
// we allow attestations to stay in the pool. If these attestations are
|
||||
// included in a later block, they will be redundant. But given that
|
||||
// they were not cached in the first place, it's unlikely that they
|
||||
// will be chosen into a block.
|
||||
ok, committees, err := helpers.AttestationCommitteesFromCache(ctx, headState, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attestation committees")
|
||||
}
|
||||
if !ok {
|
||||
log.Debug("Attestation committees are not cached. Skipping attestation pruning.")
|
||||
return nil
|
||||
}
|
||||
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
offset := uint64(0)
|
||||
|
||||
// Sanity check as this should never happen
|
||||
if len(committeeIndices) != len(committees) {
|
||||
return errors.New("committee indices and committees have different lengths")
|
||||
}
|
||||
|
||||
for i, c := range committees {
|
||||
ab := bitfield.NewBitlist(uint64(len(c)))
|
||||
for j := uint64(0); j < uint64(len(c)); j++ {
|
||||
ab.SetBitAt(j, att.GetAggregationBits().BitAt(j+offset))
|
||||
}
|
||||
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(uint64(committeeIndices[i]), true)
|
||||
|
||||
a := ðpb.AttestationElectra{
|
||||
AggregationBits: ab,
|
||||
Data: att.GetData(),
|
||||
CommitteeBits: cb,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.AttestationCache.DeleteCovered(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete covered attestation")
|
||||
}
|
||||
} else if !a.IsAggregated() {
|
||||
if err = s.cfg.AttPool.DeleteUnaggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete unaggregated attestation")
|
||||
}
|
||||
} else if err = s.cfg.AttPool.DeleteAggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete aggregated attestation")
|
||||
}
|
||||
|
||||
offset += uint64(len(c))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateMergeTransitionBlock validates the merge transition block.
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
// Skip validation if block is older than Bellatrix.
|
||||
@@ -650,13 +727,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -25,6 +27,7 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/kv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -45,6 +48,95 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.TargetCommitteeSize = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s := Service{
|
||||
cfg: &config{
|
||||
AttPool: kv.NewAttCaches(),
|
||||
},
|
||||
}
|
||||
|
||||
data := ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
att1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
att2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(3, true)
|
||||
att3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
require.NoError(t, s.cfg.AttPool.SaveUnaggregatedAttestation(att1))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att2))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att3))
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
onChainAtt := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
bl := ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
Attestations: []*ethpb.AttestationElectra{onChainAtt},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
rob, err := consensusblocks.NewSignedBeaconBlock(bl)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1024)
|
||||
committees, err := helpers.BeaconCommittees(ctx, st, 0)
|
||||
require.NoError(t, err)
|
||||
// Sanity check to make sure the on-chain att will be decomposed
|
||||
// into the correct number of aggregates.
|
||||
require.Equal(t, 4, len(committees))
|
||||
|
||||
require.NoError(t, s.pruneAttsFromPool(ctx, st, rob))
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
|
||||
attsInPool, err := s.cfg.AttPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(attsInPool))
|
||||
attsInPool = s.cfg.AttPool.AggregatedAttestations()
|
||||
require.Equal(t, 1, len(attsInPool))
|
||||
assert.DeepEqual(t, att3, attsInPool[0])
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
@@ -821,6 +913,8 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -840,7 +934,8 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.pruneAttsFromPool(wsb))
|
||||
require.NoError(t, service.pruneAttsFromPool(context.Background(), nil /* state not needed pre-Electra */, wsb))
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
@@ -1896,6 +1991,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
@@ -2030,6 +2126,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, jroot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -121,7 +120,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
if s.slasherEnabled {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
}
|
||||
|
||||
@@ -548,7 +547,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings"
|
||||
@@ -32,7 +30,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -42,6 +39,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
@@ -65,6 +63,7 @@ type Service struct {
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
slasherEnabled bool
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -269,69 +268,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return err
|
||||
}
|
||||
s.originBlockRoot = originRoot
|
||||
|
||||
if err := s.initializeHeadFromDB(s.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not set up chain info")
|
||||
st, err := s.cfg.StateGen.Resume(s.ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
if err := s.setupForkchoice(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice")
|
||||
}
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
cp := s.FinalizedCheckpt()
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, cp.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
@@ -340,7 +288,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -370,45 +317,36 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block root from forkchoice to set the current head.
|
||||
// Note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
root := s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
if err := s.setHead(&head{
|
||||
finalizedRoot,
|
||||
finalizedBlock,
|
||||
finalizedState,
|
||||
finalizedBlock.Block().Slot(),
|
||||
false,
|
||||
}); err != nil {
|
||||
if root != fRoot {
|
||||
st, err = s.cfg.StateGen.StateByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
}
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": blk.Block().Slot(),
|
||||
}).Info("Initialized head block from DB")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
175
beacon-chain/blockchain/setup_forchoice.go
Normal file
175
beacon-chain/blockchain/setup_forchoice.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
if err := s.setupForkchoiceCheckpoints(); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
headRoot := s.startupHeadRoot()
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if err := s.setupForkchoiceRoot(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if headRoot == fRoot {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
|
||||
chain := []*forkchoicetypes.BlockAndCheckpoints{}
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
root, err := head.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block root")
|
||||
}
|
||||
for {
|
||||
roblock, err := blocks.NewROBlockWithRoot(head, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This chain sets the justified checkpoint for every block, including some that are older than jp.
|
||||
// This should be however safe for forkchoice at startup. An alternative would be to hook during the
|
||||
// block processing pipeline when setting the head state, to compute the right states for the justified
|
||||
// checkpoint.
|
||||
chain = append(chain, &forkchoicetypes.BlockAndCheckpoints{Block: roblock, JustifiedCheckpoint: jp, FinalizedCheckpoint: cp})
|
||||
root = head.Block().ParentRoot()
|
||||
if root == fRoot {
|
||||
break
|
||||
}
|
||||
head, err = s.cfg.BeaconDB.Block(s.ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block")
|
||||
}
|
||||
if slots.ToEpoch(head.Block().Slot()) < cp.Epoch {
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(fRoot[:], lastValidatedCheckpoint.Root) {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
return nil
|
||||
}
|
||||
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_startupHeadRoot(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
hook := logTest.NewGlobal()
|
||||
cp := service.FinalizedCheckpt()
|
||||
require.DeepEqual(t, cp.Root, params.BeaconConfig().ZeroHash[:])
|
||||
gr := [32]byte{'r', 'o', 'o', 't'}
|
||||
service.originBlockRoot = gr
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gr))
|
||||
t.Run("start from finalized", func(t *testing.T) {
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
})
|
||||
t.Run("head requested, error path", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
hr := [32]byte{'h', 'e', 'a', 'd'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr))
|
||||
|
||||
t.Run("start from head", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), hr)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Finalized(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(1))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), primitives.Slot(2))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, preState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, root))
|
||||
cp := service.FinalizedCheckpt()
|
||||
fRoot := service.ensureRootNotZeros([32]byte(cp.Root))
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func ProcessAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ func createAttestationSignatureBatch(
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.GetSignature()
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, beaconState, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -30,6 +30,13 @@ var (
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
)
|
||||
|
||||
type beaconCommitteeFunc = func(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) ([]primitives.ValidatorIndex, error)
|
||||
|
||||
// SlotCommitteeCount returns the number of beacon committees of a slot. The
|
||||
// active validator count is provided as an argument rather than an imported implementation
|
||||
// from the spec definition. Having the active validator count as an argument allows for
|
||||
@@ -59,21 +66,48 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
|
||||
return committeesPerSlot
|
||||
}
|
||||
|
||||
// AttestationCommittees returns beacon state committees that reflect attestation's committee indices.
|
||||
func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) ([][]primitives.ValidatorIndex, error) {
|
||||
// AttestationCommitteesFromState returns beacon state committees that reflect attestation's committee indices.
|
||||
func AttestationCommitteesFromState(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) ([][]primitives.ValidatorIndex, error) {
|
||||
return attestationCommittees(ctx, st, att, BeaconCommitteeFromState)
|
||||
}
|
||||
|
||||
// AttestationCommitteesFromCache has the same functionality as AttestationCommitteesFromState, but only returns a value
|
||||
// when all attestation committees are already cached.
|
||||
func AttestationCommitteesFromCache(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) (bool, [][]primitives.ValidatorIndex, error) {
|
||||
committees, err := attestationCommittees(ctx, st, att, BeaconCommitteeFromCache)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if len(committees) == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
for _, c := range committees {
|
||||
if len(c) == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
return true, committees, nil
|
||||
}
|
||||
|
||||
func attestationCommittees(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
att ethpb.Att,
|
||||
committeeFunc beaconCommitteeFunc,
|
||||
) ([][]primitives.ValidatorIndex, error) {
|
||||
var committees [][]primitives.ValidatorIndex
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
committees = make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
for i, ci := range committeeIndices {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
committee, err := committeeFunc(ctx, st, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees[i] = committee
|
||||
}
|
||||
} else {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
committee, err := committeeFunc(ctx, st, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -164,6 +198,27 @@ func BeaconCommitteeFromState(ctx context.Context, state state.ReadOnlyBeaconSta
|
||||
return BeaconCommittee(ctx, activeIndices, seed, slot, committeeIndex)
|
||||
}
|
||||
|
||||
// BeaconCommitteeFromCache has the same functionality as BeaconCommitteeFromState, but only returns a value
|
||||
// when the committee is already cached.
|
||||
func BeaconCommitteeFromCache(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get seed")
|
||||
}
|
||||
|
||||
committee, err := committeeCache.Committee(ctx, slot, seed, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
return committee, nil
|
||||
}
|
||||
|
||||
// BeaconCommittee returns the beacon committee of a given slot and committee index. The
|
||||
// validator indices and seed are provided as an argument rather than an imported implementation
|
||||
// from the spec definition. Having them as an argument allows for cheaper computation run time.
|
||||
|
||||
@@ -729,7 +729,9 @@ func TestCommitteeIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
|
||||
}
|
||||
|
||||
func TestAttestationCommittees(t *testing.T) {
|
||||
func TestAttestationCommitteesFromState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -745,7 +747,7 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{CommitteeIndex: 0}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
@@ -755,7 +757,7 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(1, true)
|
||||
att := ðpb.AttestationElectra{CommitteeBits: bits, Data: ðpb.AttestationData{}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
@@ -763,9 +765,58 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconCommittees(t *testing.T) {
|
||||
prevConfig := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(prevConfig)
|
||||
func TestAttestationCommitteesFromCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{CommitteeIndex: 0}}
|
||||
ok, _, err := helpers.AttestationCommitteesFromCache(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(ctx, state, 0))
|
||||
ok, committees, err := helpers.AttestationCommitteesFromCache(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
bits := primitives.NewAttestationCommitteeBits()
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(1, true)
|
||||
att := ðpb.AttestationElectra{CommitteeBits: bits, Data: ðpb.AttestationData{}}
|
||||
ok, _, err := helpers.AttestationCommitteesFromCache(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(ctx, state, 0))
|
||||
ok, committees, err := helpers.AttestationCommitteesFromCache(ctx, state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 2, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconCommitteesFromState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinGenesisActiveValidatorCount = 128
|
||||
c.SlotsPerEpoch = 4
|
||||
@@ -774,15 +825,49 @@ func TestBeaconCommittees(t *testing.T) {
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 256)
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), state, 0)
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, state, 0)
|
||||
require.NoError(t, err)
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
committees, err := helpers.BeaconCommittees(context.Background(), state, 0)
|
||||
committees, err := helpers.BeaconCommittees(ctx, state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, committeesPerSlot, uint64(len(committees)))
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, 0, idx)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, 0, idx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, committees[idx], committee)
|
||||
assert.DeepEqual(t, committees[idx], committee)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconCommitteesFromCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinGenesisActiveValidatorCount = 128
|
||||
c.SlotsPerEpoch = 4
|
||||
c.TargetCommitteeSize = 16
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 256)
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, state, 0)
|
||||
require.NoError(t, err)
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
committees, err := helpers.BeaconCommittees(ctx, state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, committeesPerSlot, uint64(len(committees)))
|
||||
|
||||
helpers.ClearCache()
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := helpers.BeaconCommitteeFromCache(ctx, state, 0, idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(committee))
|
||||
}
|
||||
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(ctx, state, 0))
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := helpers.BeaconCommitteeFromCache(ctx, state, 0, idx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, committees[idx], committee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
@@ -110,6 +110,7 @@ type HeadAccessDatabase interface {
|
||||
|
||||
// Block related methods.
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
|
||||
@@ -30,22 +30,32 @@ var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
// Return block from cache if it exists.
|
||||
blk, err := s.getBlock(ctx, blockRoot, nil)
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func (s *Store) getBlock(ctx context.Context, blockRoot [32]byte, tx *bolt.Tx) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return v.(interfaces.ReadOnlySignedBeaconBlock), nil
|
||||
}
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
// This method allows the caller to pass in its tx if one is already open.
|
||||
// Or if a nil value is used, a transaction will be managed intenally.
|
||||
if tx == nil {
|
||||
var err error
|
||||
blk, err = unmarshalBlock(ctx, enc)
|
||||
return err
|
||||
})
|
||||
return blk, err
|
||||
tx, err = s.db.Begin(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.WithError(err).Error("could not rollback read-only getBlock transaction")
|
||||
}
|
||||
}()
|
||||
}
|
||||
return unmarshalBlock(ctx, tx.Bucket(blocksBucket).Get(blockRoot[:]))
|
||||
}
|
||||
|
||||
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
|
||||
@@ -70,6 +80,21 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlockRoot returns the latest canonical block root in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlockRoot() ([32]byte, error) {
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
headRoot := bkt.Get(headBlockRootKey)
|
||||
if len(headRoot) == 0 {
|
||||
return errors.New("no head block root found")
|
||||
}
|
||||
copy(root[:], headRoot)
|
||||
return nil
|
||||
})
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -227,6 +252,21 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Look up the block to find its slot; needed to remove the slot index entry.
|
||||
blk, err := s.getBlock(ctx, root, tx)
|
||||
if err != nil {
|
||||
// getBlock can return ErrNotFound, in which case we won't even try to delete it.
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := s.deleteSlotIndexEntry(tx, blk.Block().Slot(), root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.deleteMatchingParentIndex(tx, blk.Block().ParentRoot(), root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.deleteBlock(tx, root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -245,77 +285,82 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
// - blockRootValidatorHashesBucket
|
||||
// - blockSlotIndicesBucket
|
||||
// - stateSlotIndicesBucket
|
||||
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot) error {
|
||||
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot, batchSize int) (int, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteHistoricalDataBeforeSlot")
|
||||
defer span.End()
|
||||
|
||||
// Collect slot/root pairs to perform deletions in a separate read only transaction.
|
||||
var (
|
||||
roots [][]byte
|
||||
slts []primitives.Slot
|
||||
)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
var err error
|
||||
roots, slts, err = blockRootsBySlotRange(ctx, tx.Bucket(blockSlotIndicesBucket), primitives.Slot(0), cutoffSlot, nil, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve block roots")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
slotRoots, err := s.slotRootsInRange(ctx, primitives.Slot(0), cutoffSlot, batchSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve block roots and slots")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Return early if there's nothing to delete.
|
||||
if len(slotRoots) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Perform all deletions in a single transaction for atomicity
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
for _, root := range roots {
|
||||
var numSlotsDeleted int
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
for _, sr := range slotRoots {
|
||||
// Return if context is cancelled or deadline is exceeded.
|
||||
if ctx.Err() != nil {
|
||||
//nolint:nilerr
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete block
|
||||
if err = s.deleteBlock(tx, root); err != nil {
|
||||
if err = s.deleteBlock(tx, sr.root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete finalized block roots index
|
||||
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(root); err != nil {
|
||||
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(sr.root[:]); err != nil {
|
||||
return errors.Wrap(err, "could not delete finalized block root index")
|
||||
}
|
||||
|
||||
// Delete state
|
||||
if err = tx.Bucket(stateBucket).Delete(root); err != nil {
|
||||
if err = tx.Bucket(stateBucket).Delete(sr.root[:]); err != nil {
|
||||
return errors.Wrap(err, "could not delete state")
|
||||
}
|
||||
|
||||
// Delete state summary
|
||||
if err = tx.Bucket(stateSummaryBucket).Delete(root); err != nil {
|
||||
if err = tx.Bucket(stateSummaryBucket).Delete(sr.root[:]); err != nil {
|
||||
return errors.Wrap(err, "could not delete state summary")
|
||||
}
|
||||
|
||||
// Delete validator entries
|
||||
if err = s.deleteValidatorHashes(tx, root); err != nil {
|
||||
if err = s.deleteValidatorHashes(tx, sr.root[:]); err != nil {
|
||||
return errors.Wrap(err, "could not delete validators")
|
||||
}
|
||||
|
||||
numSlotsDeleted++
|
||||
}
|
||||
|
||||
for _, slot := range slts {
|
||||
for _, sr := range slotRoots {
|
||||
// Delete slot indices
|
||||
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
|
||||
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
|
||||
return errors.Wrap(err, "could not delete block slot index")
|
||||
}
|
||||
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
|
||||
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
|
||||
return errors.Wrap(err, "could not delete state slot index")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all caches after we have deleted everything from buckets.
|
||||
// This is done after the buckets are deleted to avoid any issues in case of transaction rollback.
|
||||
for _, root := range roots {
|
||||
for _, sr := range slotRoots {
|
||||
// Delete block from cache
|
||||
s.blockCache.Del(string(root))
|
||||
s.blockCache.Del(string(sr.root[:]))
|
||||
// Delete state summary from cache
|
||||
s.stateSummaryCache.delete([32]byte(root))
|
||||
s.stateSummaryCache.delete(sr.root)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return numSlotsDeleted, err
|
||||
}
|
||||
|
||||
// SaveBlock to the db.
|
||||
@@ -336,7 +381,7 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
|
||||
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
|
||||
// to the DB for future checks.
|
||||
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
|
||||
func (s *Store) shouldSaveBlinded() (bool, error) {
|
||||
var saveBlinded bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
metadataBkt := tx.Bucket(chainMetadataBucket)
|
||||
@@ -398,7 +443,7 @@ func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEnt
|
||||
}
|
||||
|
||||
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
|
||||
shouldBlind, err := s.shouldSaveBlinded(ctx)
|
||||
shouldBlind, err := s.shouldSaveBlinded()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -669,6 +714,49 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
|
||||
})
|
||||
}
|
||||
|
||||
type slotRoot struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
// slotRootsInRange returns slot and block root pairs of length min(batchSize, end-slot)
|
||||
func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot, batchSize int) ([]slotRoot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.slotRootsInRange")
|
||||
defer span.End()
|
||||
if end < start {
|
||||
return nil, errInvalidSlotRange
|
||||
}
|
||||
|
||||
var pairs []slotRoot
|
||||
key := bytesutil.SlotToBytesBigEndian(end)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(key); k != nil; k, v = c.Prev() {
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot > end {
|
||||
continue // Seek will seek to the next key *after* the given one if not present
|
||||
}
|
||||
if slot < start {
|
||||
return nil
|
||||
}
|
||||
roots, err := splitRoots(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "corrupt value %v in block slot index for slot=%d", v, slot)
|
||||
}
|
||||
for _, r := range roots {
|
||||
pairs = append(pairs, slotRoot{slot: slot, root: r})
|
||||
}
|
||||
if len(pairs) >= batchSize {
|
||||
return nil // allows code to easily cap the number of items pruned
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return pairs, err
|
||||
}
|
||||
|
||||
// blockRootsByFilter retrieves the block roots given the filter criteria.
|
||||
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")
|
||||
@@ -689,7 +777,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
|
||||
|
||||
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
|
||||
filtersMap := f.Filters()
|
||||
rootsBySlotRange, _, err := blockRootsBySlotRange(
|
||||
rootsBySlotRange, err := blockRootsBySlotRange(
|
||||
ctx,
|
||||
tx.Bucket(blockSlotIndicesBucket),
|
||||
filtersMap[filters.StartSlot],
|
||||
@@ -734,13 +822,13 @@ func blockRootsBySlotRange(
|
||||
ctx context.Context,
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, []primitives.Slot, error) {
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
|
||||
return [][]byte{}, nil, nil
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
|
||||
var startSlot, endSlot primitives.Slot
|
||||
@@ -761,11 +849,11 @@ func blockRootsBySlotRange(
|
||||
if startEpochOk && endEpochOk {
|
||||
startSlot, err = slots.EpochStart(startEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
endSlot, err = slots.EpochStart(endEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
endSlot = endSlot + params.BeaconConfig().SlotsPerEpoch - 1
|
||||
}
|
||||
@@ -776,11 +864,10 @@ func blockRootsBySlotRange(
|
||||
return key != nil && bytes.Compare(key, max) <= 0
|
||||
}
|
||||
if endSlot < startSlot {
|
||||
return nil, nil, errInvalidSlotRange
|
||||
return nil, errInvalidSlotRange
|
||||
}
|
||||
rootsRange := endSlot.SubSlot(startSlot).Div(step)
|
||||
roots := make([][]byte, 0, rootsRange)
|
||||
var slts []primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(min); conditional(k, max); k, v = c.Next() {
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
@@ -795,9 +882,8 @@ func blockRootsBySlotRange(
|
||||
splitRoots = append(splitRoots, v[i:i+32])
|
||||
}
|
||||
roots = append(roots, splitRoots...)
|
||||
slts = append(slts, slot)
|
||||
}
|
||||
return roots, slts, nil
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
@@ -853,6 +939,9 @@ func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter)
|
||||
|
||||
// unmarshal block from marshaled proto beacon block bytes to versioned beacon block struct type.
|
||||
func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(enc) == 0 {
|
||||
return nil, errors.Wrap(ErrNotFound, "empty block bytes in db")
|
||||
}
|
||||
var err error
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
@@ -1004,6 +1093,47 @@ func (s *Store) deleteBlock(tx *bolt.Tx, root []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteMatchingParentIndex(tx *bolt.Tx, parent, child [32]byte) error {
|
||||
bkt := tx.Bucket(blockParentRootIndicesBucket)
|
||||
if err := deleteRootIndexEntry(bkt, parent[:], child); err != nil {
|
||||
return errors.Wrap(err, "could not delete parent root index entry")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteSlotIndexEntry(tx *bolt.Tx, slot primitives.Slot, root [32]byte) error {
|
||||
key := bytesutil.SlotToBytesBigEndian(slot)
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
if err := deleteRootIndexEntry(bkt, key, root); err != nil {
|
||||
return errors.Wrap(err, "could not delete slot index entry")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteRootIndexEntry(bkt *bolt.Bucket, key []byte, root [32]byte) error {
|
||||
packed := bkt.Get(key)
|
||||
if len(packed) == 0 {
|
||||
return nil
|
||||
}
|
||||
updated, err := removeRoot(packed, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Don't update the value if the root was not found.
|
||||
if bytes.Equal(updated, packed) {
|
||||
return nil
|
||||
}
|
||||
// If there are no other roots in the key, just delete it.
|
||||
if len(updated) == 0 {
|
||||
if err := bkt.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Update the key with the root removed.
|
||||
return bkt.Put(key, updated)
|
||||
}
|
||||
|
||||
func (s *Store) deleteValidatorHashes(tx *bolt.Tx, root []byte) error {
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
|
||||
@@ -196,9 +196,13 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.getBlock(ctx, blockRoot, nil)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
retrievedBlock, err := db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
|
||||
_, err = db.getBlock(ctx, blockRoot, nil)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
@@ -214,10 +218,34 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedPb, err := retrievedBlock.Proto()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(wantedPb, retrievedPb), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
// Check that the block is in the slot->block index
|
||||
found, roots, err := db.BlockRootsBySlot(ctx, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, found)
|
||||
require.Equal(t, 1, len(roots))
|
||||
require.Equal(t, blockRoot, roots[0])
|
||||
// Delete the block, then check that it is no longer in the index.
|
||||
|
||||
parent := blk.Block().ParentRoot()
|
||||
testCheckParentIndices(t, db.db, parent, true)
|
||||
require.NoError(t, db.DeleteBlock(ctx, blockRoot))
|
||||
require.NoError(t, err)
|
||||
testCheckParentIndices(t, db.db, parent, false)
|
||||
found, roots, err = db.BlockRootsBySlot(ctx, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, found)
|
||||
require.Equal(t, 0, len(roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckParentIndices(t *testing.T, db *bolt.DB, parent [32]byte, expected bool) {
|
||||
require.NoError(t, db.View(func(tx *bolt.Tx) error {
|
||||
require.Equal(t, expected, tx.Bucket(blockParentRootIndicesBucket).Get(parent[:]) != nil)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func TestStore_BlocksHandleZeroCase(t *testing.T) {
|
||||
for _, tt := range blockTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -359,184 +387,221 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
|
||||
func TestStore_HistoricalDataBeforeSlot(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Save genesis block root
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
tests := []struct {
|
||||
name string
|
||||
batchSize int
|
||||
numOfEpochs uint64
|
||||
deleteBeforeSlot uint64
|
||||
}{
|
||||
{
|
||||
name: "batchSize less than delete range",
|
||||
batchSize: 10,
|
||||
numOfEpochs: 4,
|
||||
deleteBeforeSlot: 25,
|
||||
},
|
||||
{
|
||||
name: "batchSize greater than delete range",
|
||||
batchSize: 30,
|
||||
numOfEpochs: 4,
|
||||
deleteBeforeSlot: 15,
|
||||
},
|
||||
}
|
||||
|
||||
// Create and save blocks for 4 epochs
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
// Save genesis block root
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
// Mark state validator migration as complete
|
||||
err := db.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Create and save blocks for given epochs
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*tt.numOfEpochs, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
migrated, err := db.isStateValidatorMigrationOver()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, migrated)
|
||||
// Mark state validator migration as complete
|
||||
err := db.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create state summaries and states for each block
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
states := make([]state.BeaconState, len(blks))
|
||||
migrated, err := db.isStateValidatorMigrationOver()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, migrated)
|
||||
|
||||
for i, blk := range blks {
|
||||
slot := blk.Block().Slot()
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// Create state summaries and states for each block
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
states := make([]state.BeaconState, len(blks))
|
||||
|
||||
// Create and save state summary
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: slot,
|
||||
Root: r[:],
|
||||
}
|
||||
for i, blk := range blks {
|
||||
slot := blk.Block().Slot()
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save state with validator entries
|
||||
vals := make([]*ethpb.Validator, 2)
|
||||
for j := range vals {
|
||||
vals[j] = ðpb.Validator{
|
||||
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
|
||||
// Create and save state summary
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: slot,
|
||||
Root: r[:],
|
||||
}
|
||||
|
||||
// Create and save state with validator entries
|
||||
vals := make([]*ethpb.Validator, 2)
|
||||
for j := range vals {
|
||||
vals[j] = ðpb.Validator{
|
||||
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
|
||||
}
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.Validators = vals
|
||||
state.Slot = slot
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
states[i] = st
|
||||
|
||||
// Verify validator entries are saved to db
|
||||
valsActual, err := db.validatorEntries(ctx, r)
|
||||
require.NoError(t, err)
|
||||
for j, val := range valsActual {
|
||||
require.DeepEqual(t, vals[j], val)
|
||||
}
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.Validators = vals
|
||||
state.Slot = slot
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
states[i] = st
|
||||
|
||||
// Verify validator entries are saved to db
|
||||
valsActual, err := db.validatorEntries(ctx, r)
|
||||
require.NoError(t, err)
|
||||
for j, val := range valsActual {
|
||||
require.DeepEqual(t, vals[j], val)
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
// Verify slot indices exist before deletion
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete data before slot at epoch 1
|
||||
require.NoError(t, db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(slotsPerEpoch)))
|
||||
|
||||
// Verify blocks from epoch 0 are deleted
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check block is deleted
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks))
|
||||
|
||||
// Verify block does not exist
|
||||
assert.Equal(t, false, db.HasBlock(ctx, root))
|
||||
|
||||
// Verify block parent root does not exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state is deleted
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, false, hasState)
|
||||
|
||||
// Verify state summary is deleted
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, false, hasSummary)
|
||||
|
||||
// Verify validator hashes for block roots are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify slot indices are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
|
||||
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify blocks from epochs 1-3 still exist
|
||||
for i := slotsPerEpoch; i < slotsPerEpoch*4; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify block exists
|
||||
assert.Equal(t, true, db.HasBlock(ctx, root))
|
||||
|
||||
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
|
||||
if i < slotsPerEpoch*4-1 {
|
||||
// Verify slot indices exist before deletion
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := uint64(0); i < uint64(tt.deleteBeforeSlot); i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify state exists
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, true, hasState)
|
||||
// Delete data before slot
|
||||
slotsDeleted, err := db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(tt.deleteBeforeSlot), tt.batchSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state summary exists
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, true, hasSummary)
|
||||
var startSlotDeleted, endSlotDeleted uint64
|
||||
if tt.batchSize >= int(tt.deleteBeforeSlot) {
|
||||
startSlotDeleted = 1
|
||||
endSlotDeleted = tt.deleteBeforeSlot
|
||||
} else {
|
||||
startSlotDeleted = tt.deleteBeforeSlot - uint64(tt.batchSize) + 1
|
||||
endSlotDeleted = tt.deleteBeforeSlot
|
||||
}
|
||||
|
||||
// Verify slot indices still exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
require.Equal(t, endSlotDeleted-startSlotDeleted+1, uint64(slotsDeleted))
|
||||
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
|
||||
return nil
|
||||
// Verify blocks before given slot/batch are deleted
|
||||
for i := startSlotDeleted; i < endSlotDeleted; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check block is deleted
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks), fmt.Sprintf("Expected %d blocks, got %d for slot %d", 0, len(retrievedBlocks), i))
|
||||
|
||||
// Verify block does not exist
|
||||
assert.Equal(t, false, db.HasBlock(ctx, root), fmt.Sprintf("Expected block index to not exist for slot %d", i))
|
||||
|
||||
// Verify block parent root does not exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state is deleted
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, false, hasState)
|
||||
|
||||
// Verify state summary is deleted
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, false, hasSummary)
|
||||
|
||||
// Verify validator hashes for block roots are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify slot indices are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := startSlotDeleted; i < endSlotDeleted; i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
|
||||
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify blocks from expectedLastDeletedSlot till numEpochs still exist
|
||||
for i := endSlotDeleted; i < slotsPerEpoch*tt.numOfEpochs; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify block exists
|
||||
assert.Equal(t, true, db.HasBlock(ctx, root))
|
||||
|
||||
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
|
||||
if i < slotsPerEpoch*tt.numOfEpochs-1 {
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify state exists
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, true, hasState)
|
||||
|
||||
// Verify state summary exists
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, true, hasSummary)
|
||||
|
||||
// Verify slot indices still exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify validator entries still exist
|
||||
valsActual, err := db.validatorEntries(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, valsActual)
|
||||
|
||||
// Verify remaining validator hashes for block roots exists
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify validator entries still exist
|
||||
valsActual, err := db.validatorEntries(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, valsActual)
|
||||
|
||||
// Verify remaining validator hashes for block roots exists
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
|
||||
@@ -820,30 +820,25 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
|
||||
// no need to construct the validator entries as it is not used here.
|
||||
s, err := s.unmarshalState(ctx, enc, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "could not unmarshal state")
|
||||
}
|
||||
if s == nil || s.IsNil() {
|
||||
return 0, errors.New("state can't be nil")
|
||||
}
|
||||
return s.Slot(), nil
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
err := decode(ctx, enc, b)
|
||||
b, err := unmarshalBlock(ctx, enc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not unmarshal block")
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(wsb); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Block.Slot, nil
|
||||
return b.Block().Slot(), nil
|
||||
}
|
||||
stateSummary := ðpb.StateSummary{}
|
||||
if err := decode(ctx, enc, stateSummary); err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "could not unmarshal state summary")
|
||||
}
|
||||
return stateSummary.Slot, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
mathRand "math/rand"
|
||||
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1070,6 +1069,31 @@ func TestBellatrixState_CanDelete(t *testing.T) {
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestBellatrixState_CanDeleteWithBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 100
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
require.NoError(t, db.DeleteState(context.Background(), r))
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
|
||||
@@ -114,3 +114,27 @@ func splitRoots(b []byte) ([][32]byte, error) {
|
||||
}
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
func removeRoot(roots []byte, root [32]byte) ([]byte, error) {
|
||||
if len(roots) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if len(roots) == 32 && bytes.Equal(roots, root[:]) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if len(roots)%32 != 0 {
|
||||
return nil, errors.Wrapf(errMisalignedRootList, "root list len=%d", len(roots))
|
||||
}
|
||||
|
||||
search := root[:]
|
||||
for i := 0; i <= len(roots)-32; i += 32 {
|
||||
if bytes.Equal(roots[i:i+32], search) {
|
||||
result := make([]byte, len(roots)-32)
|
||||
copy(result, roots[:i])
|
||||
copy(result[i:], roots[i+32:])
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
@@ -195,3 +196,85 @@ func TestSplitRoots(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func tPad(p ...[]byte) []byte {
|
||||
r := make([]byte, 32*len(p))
|
||||
for i, b := range p {
|
||||
copy(r[i*32:], b)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRemoveRoot(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
roots []byte
|
||||
root [32]byte
|
||||
expect []byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
roots: []byte{},
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xef},
|
||||
expect: []byte{},
|
||||
},
|
||||
{
|
||||
name: "single",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xef},
|
||||
expect: []byte{},
|
||||
},
|
||||
{
|
||||
name: "single, different",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xee},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, reordered",
|
||||
roots: tPad([]byte{0xac, 0x1d, 0xfa, 0xce}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, 3",
|
||||
roots: tPad([]byte{0xac, 0x1d, 0xfa, 0xce}, []byte{0xbe, 0xef, 0xca, 0xb5}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xbe, 0xef, 0xca, 0xb5}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, different",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
root: [32]byte{0xac, 0x1d, 0xbe, 0xa7},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
},
|
||||
{
|
||||
name: "misaligned",
|
||||
roots: make([]byte, 61),
|
||||
root: [32]byte{0xac, 0x1d, 0xbe, 0xa7},
|
||||
err: errMisalignedRootList,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
before := make([]byte, len(c.roots))
|
||||
copy(before, c.roots)
|
||||
r, err := removeRoot(c.roots, c.root)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(c.expect), len(r))
|
||||
require.Equal(t, true, bytes.Equal(c.expect, r))
|
||||
require.Equal(t, true, bytes.Equal(before, c.roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -19,7 +21,17 @@ func (s *Store) LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint,
|
||||
if enc == nil {
|
||||
var finErr error
|
||||
checkpoint, finErr = s.FinalizedCheckpoint(ctx)
|
||||
return finErr
|
||||
if finErr != nil {
|
||||
return finErr
|
||||
}
|
||||
if bytes.Equal(checkpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
bkt = tx.Bucket(blocksBucket)
|
||||
r := bkt.Get(genesisBlockRootKey)
|
||||
if r != nil {
|
||||
checkpoint.Root = r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
checkpoint = ðpb.Checkpoint{}
|
||||
return decode(ctx, enc, checkpoint)
|
||||
|
||||
@@ -16,6 +16,15 @@ import (
|
||||
|
||||
var log = logrus.WithField("prefix", "db-pruner")
|
||||
|
||||
const (
|
||||
// defaultPrunableBatchSize is the number of slots that can be pruned at once.
|
||||
defaultPrunableBatchSize = 32
|
||||
// defaultPruningWindow is the duration of one pruning window.
|
||||
defaultPruningWindow = time.Second * 3
|
||||
// defaultNumBatchesToPrune is the number of batches to prune in one pruning window.
|
||||
defaultNumBatchesToPrune = 15
|
||||
)
|
||||
|
||||
type ServiceOption func(*Service)
|
||||
|
||||
// WithRetentionPeriod allows the user to specify a different data retention period than the spec default.
|
||||
@@ -143,14 +152,17 @@ func (p *Service) prune(slot primitives.Slot) error {
|
||||
}).Debug("Pruning chain data")
|
||||
|
||||
tt := time.Now()
|
||||
if err := p.db.DeleteHistoricalDataBeforeSlot(p.ctx, pruneUpto); err != nil {
|
||||
return errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
|
||||
numBatches, err := p.pruneBatches(pruneUpto)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune batches")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedUpto": pruneUpto,
|
||||
"duration": time.Since(tt),
|
||||
"currentSlot": slot,
|
||||
"batchSize": defaultPrunableBatchSize,
|
||||
"numBatches": numBatches,
|
||||
}).Debug("Successfully pruned chain data")
|
||||
|
||||
// Update pruning checkpoint.
|
||||
@@ -159,6 +171,33 @@ func (p *Service) prune(slot primitives.Slot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
|
||||
ctx, cancel := context.WithTimeout(p.ctx, defaultPruningWindow)
|
||||
defer cancel()
|
||||
|
||||
numBatches := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return numBatches, nil
|
||||
default:
|
||||
for i := 0; i < defaultNumBatchesToPrune; i++ {
|
||||
slotsDeleted, err := p.db.DeleteHistoricalDataBeforeSlot(ctx, pruneUpto, defaultPrunableBatchSize)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
|
||||
}
|
||||
|
||||
// Return if there's nothing to delete.
|
||||
if slotsDeleted == 0 {
|
||||
return numBatches, nil
|
||||
}
|
||||
|
||||
numBatches++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
|
||||
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
|
||||
return func(current primitives.Slot) primitives.Slot {
|
||||
|
||||
@@ -156,6 +156,10 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
@@ -169,6 +173,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
@@ -15,7 +13,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
} else {
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
@@ -24,7 +22,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
} else {
|
||||
proposerScore = (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
|
||||
currentNode.balance += proposerScore
|
||||
|
||||
@@ -252,6 +252,13 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
return roots, slots
|
||||
}
|
||||
|
||||
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
if f.store.highestReceivedNode == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return f.store.highestReceivedNode.root
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
|
||||
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if f.store.highestReceivedNode == nil {
|
||||
|
||||
@@ -65,6 +65,7 @@ type FastGetter interface {
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HighestReceivedBlockDelay() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
|
||||
@@ -114,6 +114,13 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return ro.getter.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -29,6 +29,7 @@ const (
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
highestReceivedBlockDelayCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
@@ -252,6 +253,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockRootCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
||||
return 0
|
||||
|
||||
@@ -166,7 +166,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
if !cliCtx.IsSet(flags.SuggestedFeeRecipient.Name) {
|
||||
log.Warnf("In order to receive transaction fees from proposing blocks, " +
|
||||
log.Warn("In order to receive transaction fees from proposing blocks, " +
|
||||
"you must provide flag --" + flags.SuggestedFeeRecipient.Name + " with a valid ethereum address when starting your beacon node. " +
|
||||
"Please see our documentation for more information on this requirement (https://docs.prylabs.network/docs/execution-node/fee-recipient).")
|
||||
return nil
|
||||
|
||||
@@ -122,6 +122,7 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -159,6 +160,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -342,7 +344,7 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
return errors.Wrap(err, "could not register slashing pool service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
log.WithField("enabled", beacon.slasherEnabled).Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return errors.Wrap(err, "could not register slasher service")
|
||||
}
|
||||
@@ -587,7 +589,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if !features.Get().EnableSlasher {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
@@ -775,6 +777,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -859,6 +862,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -887,7 +891,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerSlasherService() error {
|
||||
if !features.Get().EnableSlasher {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
}
|
||||
var chainService *blockchain.Service
|
||||
@@ -934,7 +938,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
}
|
||||
|
||||
var slasherService *slasher.Service
|
||||
if features.Get().EnableSlasher {
|
||||
if b.slasherEnabled {
|
||||
if err := b.services.FetchService(&slasherService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
|
||||
@@ -60,7 +61,8 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
for _, att := range unAggregatedAtts {
|
||||
seen, err := c.hasSeenBit(att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Debug("Could not check if unaggregated attestation's bit has been seen. Attestation will not be returned")
|
||||
continue
|
||||
}
|
||||
if !seen {
|
||||
atts = append(atts, att.Clone())
|
||||
@@ -137,7 +139,7 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
|
||||
}
|
||||
|
||||
if err := c.insertSeenBit(att); err != nil {
|
||||
return err
|
||||
log.WithError(err).Debug("Could not insert seen bit of unaggregated attestation. Attestation will be deleted")
|
||||
}
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
@@ -163,7 +165,12 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
|
||||
if att == nil || att.IsNil() || att.IsAggregated() {
|
||||
continue
|
||||
}
|
||||
if seen, err := c.hasSeenBit(att); err == nil && seen {
|
||||
seen, err := c.hasSeenBit(att)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if unaggregated attestation's bit has been seen. Attestation will be deleted")
|
||||
seen = true
|
||||
}
|
||||
if seen {
|
||||
delete(c.unAggregatedAtt, r)
|
||||
count++
|
||||
}
|
||||
|
||||
@@ -17,6 +17,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestations(t *testing.T) {
|
||||
t.Run("not returned when hasSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
atts, err := cache.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(atts))
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -155,6 +173,21 @@ func TestKV_Unaggregated_DeleteUnaggregatedAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, []ethpb.Att{}, returned)
|
||||
})
|
||||
|
||||
t.Run("deleted when insertSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
require.NoError(t, cache.DeleteUnaggregatedAttestation(att))
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Attestation was not deleted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
@@ -232,6 +265,23 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, []ethpb.Att{}, returned)
|
||||
})
|
||||
|
||||
t.Run("deleted when hasSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
count, err := cache.DeleteSeenUnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, count)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Attestation was not deleted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
|
||||
@@ -33,6 +33,9 @@ var (
|
||||
// AggregateAttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
|
||||
// AttesterSlashingMap maps the fork-version to the underlying data type for that particular
|
||||
// fork period.
|
||||
AttesterSlashingMap map[[4]byte]func() (ethpb.AttSlashing, error)
|
||||
)
|
||||
|
||||
// InitializeDataMaps initializes all the relevant object maps. This function is called to
|
||||
@@ -151,4 +154,29 @@ func InitializeDataMaps() {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our aggregate attestation map.
|
||||
AttesterSlashingMap = map[[4]byte]func() (ethpb.AttSlashing, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashing{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashing{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashing{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashing{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashing{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.AttSlashing, error) {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,13 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, agg.Version())
|
||||
}
|
||||
attSlashFunc, ok := AttesterSlashingMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
attSlash, err := attSlashFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, attSlash.Version())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -28,7 +27,7 @@ func TestVerifyConnectivity(t *testing.T) {
|
||||
{"123.123.123.123", 19000, false, "Dialing an unreachable IP: 123.123.123.123:19000"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(fmt.Sprintf(tc.name),
|
||||
t.Run(tc.name,
|
||||
func(t *testing.T) {
|
||||
verifyConnectivity(tc.address, tc.port, "tcp")
|
||||
logMessage := "IP address is not accessible"
|
||||
|
||||
@@ -633,7 +633,7 @@ func (s *Server) SubmitBLSToExecutionChanges(w http.ResponseWriter, r *http.Requ
|
||||
toBroadcast = append(toBroadcast, sbls)
|
||||
}
|
||||
}
|
||||
go s.broadcastBLSChanges(ctx, toBroadcast)
|
||||
go s.broadcastBLSChanges(context.Background(), toBroadcast)
|
||||
if len(failures) > 0 {
|
||||
failuresErr := &server.IndexedVerificationFailureError{
|
||||
Code: http.StatusBadRequest,
|
||||
|
||||
@@ -126,7 +126,7 @@ func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*structs
|
||||
|
||||
func handleWrapError(err error, message string, code int) *httputil.DefaultJsonError {
|
||||
return &httputil.DefaultJsonError{
|
||||
Message: errors.Wrapf(err, message).Error(),
|
||||
Message: errors.Wrap(err, message).Error(),
|
||||
Code: code,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,6 +160,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxTransactionsPerPayload = 99
|
||||
config.FieldElementsPerBlob = 100
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -198,7 +200,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 168, len(data))
|
||||
assert.Equal(t, 170, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -559,6 +561,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -190,6 +190,7 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
|
||||
Balance: fmt.Sprintf("%d", n.Balance),
|
||||
ExecutionOptimistic: n.ExecutionOptimistic,
|
||||
TimeStamp: fmt.Sprintf("%d", n.Timestamp),
|
||||
Target: fmt.Sprintf("%#x", n.Target),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
@@ -58,6 +59,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -20,7 +21,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
chaintime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
@@ -142,7 +144,7 @@ func newTopicRequest(topics []string) (*topicRequest, error) {
|
||||
} else if topicsForOpsFeed[name] {
|
||||
req.needOpsFeed = true
|
||||
} else {
|
||||
return nil, errors.Wrapf(errInvalidTopicName, name)
|
||||
return nil, errors.Wrap(errInvalidTopicName, name)
|
||||
}
|
||||
req.topics[name] = true
|
||||
}
|
||||
@@ -352,9 +354,18 @@ func writeLazyReaderWithRecover(w *streamingResponseWriterController, lr lazyRea
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("panic", r).Error("Recovered from panic while writing event to client.")
|
||||
err = errWriterUnusable
|
||||
debug.PrintStack()
|
||||
}
|
||||
}()
|
||||
if lr == nil {
|
||||
log.Warn("Event stream skipping a nil lazy event reader callback")
|
||||
return nil
|
||||
}
|
||||
r := lr()
|
||||
if r == nil {
|
||||
log.Warn("Event stream skipping a nil event reader")
|
||||
return nil
|
||||
}
|
||||
out, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -600,27 +611,14 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
|
||||
var errUnsupportedPayloadAttribute = errors.New("cannot compute payload attributes pre-Bellatrix")
|
||||
|
||||
func (s *Server) computePayloadAttributes(ctx context.Context, ev payloadattribute.EventData) (payloadattribute.Attributer, error) {
|
||||
v := ev.HeadState.Version()
|
||||
func (s *Server) computePayloadAttributes(ctx context.Context, st state.ReadOnlyBeaconState, root [32]byte, proposer primitives.ValidatorIndex, timestamp uint64, randao []byte) (payloadattribute.Attributer, error) {
|
||||
v := st.Version()
|
||||
if v < version.Bellatrix {
|
||||
return nil, errors.Wrapf(errUnsupportedPayloadAttribute, "%s is not supported", version.String(v))
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(ev.HeadState.GenesisTime(), ev.HeadState.Slot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state slot time")
|
||||
}
|
||||
timestamp := uint64(t.Unix())
|
||||
prevRando, err := helpers.RandaoMix(ev.HeadState, chaintime.CurrentEpoch(ev.HeadState))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state randao mix")
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, ev.HeadState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state proposer index")
|
||||
}
|
||||
feeRecpt := params.BeaconConfig().DefaultFeeRecipient.Bytes()
|
||||
tValidator, exists := s.TrackedValidatorsCache.Validator(proposerIndex)
|
||||
tValidator, exists := s.TrackedValidatorsCache.Validator(proposer)
|
||||
if exists {
|
||||
feeRecpt = tValidator.FeeRecipient[:]
|
||||
}
|
||||
@@ -628,34 +626,30 @@ func (s *Server) computePayloadAttributes(ctx context.Context, ev payloadattribu
|
||||
if v == version.Bellatrix {
|
||||
return payloadattribute.New(&engine.PayloadAttributes{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
})
|
||||
}
|
||||
|
||||
w, _, err := ev.HeadState.ExpectedWithdrawals()
|
||||
w, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals from head state")
|
||||
}
|
||||
if v == version.Capella {
|
||||
return payloadattribute.New(&engine.PayloadAttributesV2{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
Withdrawals: w,
|
||||
})
|
||||
}
|
||||
|
||||
pr, err := ev.HeadBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute head block root")
|
||||
}
|
||||
return payloadattribute.New(&engine.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
Withdrawals: w,
|
||||
ParentBeaconBlockRoot: pr[:],
|
||||
ParentBeaconBlockRoot: root[:],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -665,37 +659,75 @@ type asyncPayloadAttrData struct {
|
||||
err error
|
||||
}
|
||||
|
||||
var zeroRoot [32]byte
|
||||
|
||||
// needsFill allows tests to provide filled EventData values. An ordinary event data value fired by the blockchain package will have
|
||||
// all of the checked fields empty, so the logical short circuit should hit immediately.
|
||||
func needsFill(ev payloadattribute.EventData) bool {
|
||||
return ev.HeadState == nil || ev.HeadState.IsNil() || ev.HeadState.LatestBlockHeader() == nil ||
|
||||
ev.HeadBlock == nil || ev.HeadBlock.IsNil() ||
|
||||
ev.HeadRoot == zeroRoot || len(ev.ParentBlockRoot) == 0 || len(ev.ParentBlockHash) == 0 ||
|
||||
ev.Attributer == nil || ev.Attributer.IsEmpty()
|
||||
}
|
||||
|
||||
func (s *Server) fillEventData(ctx context.Context, ev payloadattribute.EventData) (payloadattribute.EventData, error) {
|
||||
if ev.HeadBlock == nil || ev.HeadBlock.IsNil() {
|
||||
hb, err := s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not look up head block")
|
||||
}
|
||||
root, err := hb.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not compute head block root")
|
||||
}
|
||||
if ev.HeadRoot != root {
|
||||
return ev, errors.Wrap(err, "head root changed before payload attribute event handler execution")
|
||||
}
|
||||
ev.HeadBlock = hb
|
||||
payload, err := hb.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not get execution payload for head block")
|
||||
}
|
||||
ev.ParentBlockHash = payload.BlockHash()
|
||||
ev.ParentBlockNumber = payload.BlockNumber()
|
||||
var err error
|
||||
|
||||
if !needsFill(ev) {
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
attr := ev.Attributer
|
||||
if attr == nil || attr.IsEmpty() {
|
||||
attr, err := s.computePayloadAttributes(ctx, ev)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not compute payload attributes")
|
||||
}
|
||||
ev.Attributer = attr
|
||||
ev.HeadState, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
return ev, nil
|
||||
|
||||
ev.HeadBlock, err = s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not look up head block")
|
||||
}
|
||||
ev.HeadRoot, err = ev.HeadBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not compute head block root")
|
||||
}
|
||||
pr := ev.HeadBlock.Block().ParentRoot()
|
||||
ev.ParentBlockRoot = pr[:]
|
||||
|
||||
hsr, err := ev.HeadState.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not compute latest block header root")
|
||||
}
|
||||
|
||||
pse := slots.ToEpoch(ev.ProposalSlot)
|
||||
st := ev.HeadState
|
||||
if slots.ToEpoch(st.Slot()) != pse {
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, hsr[:], ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not run process blocks on head state into the proposal slot epoch")
|
||||
}
|
||||
}
|
||||
ev.ProposerIndex, err = helpers.BeaconProposerIndexAtSlot(ctx, st, ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "failed to compute proposer index")
|
||||
}
|
||||
randao, err := helpers.RandaoMix(st, pse)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state randado")
|
||||
}
|
||||
|
||||
payload, err := ev.HeadBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get execution payload for head block")
|
||||
}
|
||||
ev.ParentBlockHash = payload.BlockHash()
|
||||
ev.ParentBlockNumber = payload.BlockNumber()
|
||||
|
||||
t, err := slots.ToTime(st.GenesisTime(), ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state slot time")
|
||||
}
|
||||
ev.Attributer, err = s.computePayloadAttributes(ctx, st, hsr, ev.ProposerIndex, uint64(t.Unix()), randao)
|
||||
return ev, err
|
||||
}
|
||||
|
||||
// This event stream is intended to be used by builders and relays.
|
||||
@@ -704,10 +736,7 @@ func (s *Server) payloadAttributesReader(ctx context.Context, ev payloadattribut
|
||||
ctx, cancel := context.WithTimeout(ctx, payloadAttributeTimeout)
|
||||
edc := make(chan asyncPayloadAttrData)
|
||||
go func() {
|
||||
d := asyncPayloadAttrData{
|
||||
version: version.String(ev.HeadState.Version()),
|
||||
}
|
||||
|
||||
d := asyncPayloadAttrData{}
|
||||
defer func() {
|
||||
edc <- d
|
||||
}()
|
||||
@@ -716,6 +745,7 @@ func (s *Server) payloadAttributesReader(ctx context.Context, ev payloadattribut
|
||||
d.err = errors.Wrap(err, "Could not fill event data")
|
||||
return
|
||||
}
|
||||
d.version = version.String(ev.HeadBlock.Version())
|
||||
attributesBytes, err := marshalAttributes(ev.Attributer)
|
||||
if err != nil {
|
||||
d.err = errors.Wrap(err, "errors marshaling payload attributes to json")
|
||||
|
||||
@@ -2,6 +2,7 @@ package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -557,6 +559,110 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillEventData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("AlreadyFilledData_ShouldShortCircuitWithoutError", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ð.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
attributor, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
alreadyFilled := payloadattribute.EventData{
|
||||
HeadState: st,
|
||||
HeadBlock: b,
|
||||
HeadRoot: [32]byte{1, 2, 3},
|
||||
Attributer: attributor,
|
||||
ParentBlockRoot: []byte{1, 2, 3},
|
||||
ParentBlockHash: []byte{4, 5, 6},
|
||||
}
|
||||
srv := &Server{} // No real HeadFetcher needed here since it won't be called.
|
||||
result, err := srv.fillEventData(ctx, alreadyFilled)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, alreadyFilled, result)
|
||||
})
|
||||
t.Run("Electra PartialData_ShouldFetchHeadStateAndBlock", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
valCount := 10
|
||||
setActiveValidators(t, st, valCount)
|
||||
inactivityScores := make([]uint64, valCount)
|
||||
for i := range inactivityScores {
|
||||
inactivityScores[i] = 10
|
||||
}
|
||||
require.NoError(t, st.SetInactivityScores(inactivityScores))
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockElectra(ð.SignedBeaconBlockElectra{}))
|
||||
require.NoError(t, err)
|
||||
attributor, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Create an event data object missing certain fields:
|
||||
partial := payloadattribute.EventData{
|
||||
// The presence of a nil HeadState, nil HeadBlock, zeroed HeadRoot, etc.
|
||||
// will cause fillEventData to try to fill the values.
|
||||
ProposalSlot: 42, // different epoch from current slot
|
||||
Attributer: attributor, // Must be Bellatrix or later
|
||||
}
|
||||
currentSlot := primitives.Slot(0)
|
||||
// to avoid slot processing
|
||||
require.NoError(t, st.SetSlot(currentSlot+1))
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
}
|
||||
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
srv := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
EventWriteTimeout: testEventWriteTimeout,
|
||||
}
|
||||
|
||||
filled, err := srv.fillEventData(ctx, partial)
|
||||
require.NoError(t, err, "expected successful fill of partial event data")
|
||||
|
||||
// Verify that fields have been updated from the mock data:
|
||||
require.NotNil(t, filled.HeadState, "HeadState should be assigned")
|
||||
require.NotNil(t, filled.HeadBlock, "HeadBlock should be assigned")
|
||||
require.NotEqual(t, [32]byte{}, filled.HeadRoot, "HeadRoot should no longer be zero")
|
||||
require.NotEmpty(t, filled.ParentBlockRoot, "ParentBlockRoot should be filled")
|
||||
require.NotEmpty(t, filled.ParentBlockHash, "ParentBlockHash should be filled")
|
||||
require.Equal(t, uint64(0), filled.ParentBlockNumber, "ParentBlockNumber must match mock block")
|
||||
|
||||
// Check that a valid Attributer was set:
|
||||
require.NotNil(t, filled.Attributer, "Should have a valid payload attributes object")
|
||||
require.Equal(t, false, filled.Attributer.IsEmpty(), "Attributer should not be empty after fill")
|
||||
})
|
||||
}
|
||||
|
||||
func setActiveValidators(t *testing.T, st state.BeaconState, count int) {
|
||||
balances := make([]uint64, count)
|
||||
validators := make([]*eth.Validator, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.LittleEndian.PutUint64(pubKey, uint64(i))
|
||||
balances[i] = uint64(i)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: pubKey,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
})
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
}
|
||||
|
||||
func TestStuckReaderScenarios(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"helpers.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client",
|
||||
@@ -17,11 +16,9 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -182,18 +183,31 @@ func (s *Server) GetLightClientFinalityUpdate(w http.ResponseWriter, req *http.R
|
||||
return
|
||||
}
|
||||
|
||||
update, err := newLightClientFinalityUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock, finalizedBlock)
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get light client finality update: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := &structs.LightClientFinalityUpdateResponse{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: update,
|
||||
if httputil.RespondWithSsz(req) {
|
||||
ssz, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal finality update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
httputil.WriteSsz(w, ssz, "light_client_finality_update.ssz")
|
||||
} else {
|
||||
updateStruct, err := structs.LightClientFinalityUpdateFromConsensus(update)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not convert light client finality update to API struct: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.LightClientFinalityUpdateResponse{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: updateStruct,
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// GetLightClientOptimisticUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/optimistic_update.yaml
|
||||
@@ -232,18 +246,31 @@ func (s *Server) GetLightClientOptimisticUpdate(w http.ResponseWriter, req *http
|
||||
return
|
||||
}
|
||||
|
||||
update, err := newLightClientOptimisticUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock)
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get light client optimistic update: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := &structs.LightClientOptimisticUpdateResponse{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: update,
|
||||
if httputil.RespondWithSsz(req) {
|
||||
ssz, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal optimistic update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
httputil.WriteSsz(w, ssz, "light_client_optimistic_update.ssz")
|
||||
} else {
|
||||
updateStruct, err := structs.LightClientOptimisticUpdateFromConsensus(update)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not convert light client optimistic update to API struct: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.LightClientOptimisticUpdateResponse{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: updateStruct,
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// suitableBlock returns the latest block that satisfies all criteria required for creating a new update
|
||||
|
||||
@@ -1105,116 +1105,226 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
EnableLightClient: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
helpers.ClearCache()
|
||||
ctx := context.Background()
|
||||
config := params.BeaconConfig()
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
t.Run("altair", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
attestedState, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
parent := util.NewBeaconBlockAltair()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
parent := util.NewBeaconBlockAltair()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s.GetLightClientFinalityUpdate(writer, request)
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
var resp *structs.LightClientUpdateResponse
|
||||
err = json.Unmarshal(writer.Body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
var respHeader structs.LightClientHeader
|
||||
err = json.Unmarshal(resp.Data.AttestedHeader, &respHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "altair", resp.Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
|
||||
require.NotNil(t, resp.Data)
|
||||
s.GetLightClientFinalityUpdate(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
var resp *structs.LightClientUpdateResponse
|
||||
err = json.Unmarshal(writer.Body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
var respHeader structs.LightClientHeader
|
||||
err = json.Unmarshal(resp.Data.AttestedHeader, &respHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "altair", resp.Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
|
||||
require.NotNil(t, resp.Data)
|
||||
})
|
||||
|
||||
t.Run("altair SSZ", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
|
||||
parent := util.NewBeaconBlockAltair()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientFinalityUpdate(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientFinalityUpdateAltair
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedHeader.Slot, resp.AttestedHeader.Beacon.Slot)
|
||||
require.DeepEqual(t, attestedHeader.BodyRoot, resp.AttestedHeader.Beacon.BodyRoot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
@@ -1335,6 +1445,114 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NotNil(t, resp.Data)
|
||||
})
|
||||
|
||||
t.Run("altair SSZ", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
|
||||
parent := util.NewBeaconBlockAltair()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientOptimisticUpdate(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientOptimisticUpdateAltair
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
|
||||
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
|
||||
})
|
||||
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
@@ -1445,6 +1663,114 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NotNil(t, resp.Data)
|
||||
})
|
||||
|
||||
t.Run("capella SSZ", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientOptimisticUpdate(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientOptimisticUpdateCapella
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
|
||||
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
|
||||
})
|
||||
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
@@ -1554,6 +1880,114 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
|
||||
require.NotNil(t, resp.Data)
|
||||
})
|
||||
|
||||
t.Run("deneb SSZ", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
slot := primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
err = attestedState.SetSlot(slot.Sub(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
|
||||
Epoch: config.AltairForkEpoch - 10,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
|
||||
parent := util.NewBeaconBlockDeneb()
|
||||
parent.Block.Slot = slot.Sub(1)
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = st.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockBlocker := &testutil.MockBlocker{
|
||||
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
|
||||
parentRoot: signedParent,
|
||||
root: signedBlock,
|
||||
},
|
||||
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
slot.Sub(1): signedParent,
|
||||
slot: signedBlock,
|
||||
},
|
||||
}
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
|
||||
root: true,
|
||||
}}
|
||||
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
slot.Sub(1): attestedState,
|
||||
slot: st,
|
||||
}},
|
||||
Blocker: mockBlocker,
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainInfoFetcher,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientOptimisticUpdate(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientOptimisticUpdateDeneb
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
|
||||
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientEventBlock(t *testing.T) {
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
func newLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*structs.LightClientFinalityUpdate, error) {
|
||||
result, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return structs.LightClientFinalityUpdateFromConsensus(result)
|
||||
}
|
||||
|
||||
func newLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*structs.LightClientOptimisticUpdate, error) {
|
||||
result, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return structs.LightClientOptimisticUpdateFromConsensus(result)
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func TestGetPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected ID '" + expectedId + "' not found")
|
||||
t.Error("Expected ID '" + expectedId + "' not found")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -636,6 +636,16 @@ func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceSyncCommitteeContribution")
|
||||
defer span.End()
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if isOptimistic {
|
||||
httputil.HandleError(w, "Beacon node is currently syncing and not serving request on that endpoint", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
_, index, ok := shared.UintFromQuery(w, r, "subcommittee_index", true)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -1584,7 +1584,8 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=1&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
@@ -1672,7 +1673,8 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
@@ -1680,6 +1682,26 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp2))
|
||||
require.ErrorContains(t, "No subcommittee messages found", errors.New(writer.Body.String()))
|
||||
})
|
||||
t.Run("Optimistic returns 503", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=1&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
syncCommitteePool = synccommittee.NewStore()
|
||||
server = Server{
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{
|
||||
Optimistic: true,
|
||||
},
|
||||
}
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_RegisterValidator(t *testing.T) {
|
||||
|
||||
@@ -417,7 +417,7 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
for _, r := range request.Recipients {
|
||||
recipient := hexutil.Encode(r.FeeRecipient)
|
||||
if !common.IsHexAddress(recipient) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid fee recipient address: %v", recipient)
|
||||
}
|
||||
// Use default address if the burn address is return
|
||||
feeRecipient := primitives.ExecutionAddress(r.FeeRecipient)
|
||||
@@ -470,7 +470,7 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
}, nil
|
||||
} else {
|
||||
log.WithError(err).Error("An error occurred while retrieving fee recipient from db")
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Errorf(codes.Internal, "error=%s", err)
|
||||
}
|
||||
}
|
||||
return ðpb.FeeRecipientByPubKeyResponse{
|
||||
|
||||
@@ -269,7 +269,7 @@ func (vs *Server) optimisticStatus(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return status.Errorf(codes.Unavailable, errOptimisticMode.Error())
|
||||
return status.Errorf(codes.Unavailable, "error=%v", errOptimisticMode)
|
||||
}
|
||||
|
||||
// validatorStatus searches for the requested validator's state and deposit to retrieve its inclusion estimate. Also returns the validators index.
|
||||
|
||||
@@ -36,7 +36,7 @@ func (vs *Server) GetSyncMessageBlockRoot(
|
||||
// It also saves the sync committee message into the pending pool for block inclusion.
|
||||
func (vs *Server) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) (*emptypb.Empty, error) {
|
||||
if err := vs.CoreService.SubmitSyncMessage(ctx, msg); err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), err.Err.Error())
|
||||
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "error=%s", err.Err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func (vs *Server) SubmitSignedContributionAndProof(
|
||||
) (*emptypb.Empty, error) {
|
||||
err := vs.CoreService.SubmitSignedContributionAndProof(ctx, s)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), err.Err.Error())
|
||||
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "error=%s", err.Err)
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
@@ -121,9 +121,10 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
|
||||
return nil, err
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(c.Root)
|
||||
st := fState
|
||||
// Resume as genesis state if last finalized root is zero hashes.
|
||||
if fRoot == params.BeaconConfig().ZeroHash {
|
||||
st, err := s.beaconDB.GenesisState(ctx)
|
||||
st, err = s.beaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get genesis state")
|
||||
}
|
||||
@@ -132,10 +133,13 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
|
||||
if err != nil {
|
||||
return nil, stderrors.Join(ErrNoGenesisBlock, err)
|
||||
}
|
||||
return st, s.SaveState(ctx, gbr, st)
|
||||
fRoot = gbr
|
||||
if err := s.SaveState(ctx, gbr, st); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
}
|
||||
|
||||
if fState == nil || fState.IsNil() {
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, errors.New("finalized state is nil")
|
||||
}
|
||||
|
||||
@@ -145,20 +149,22 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
|
||||
}
|
||||
}()
|
||||
|
||||
s.finalizedInfo = &finalizedInfo{slot: fState.Slot(), root: fRoot, state: fState.Copy()}
|
||||
fEpoch := slots.ToEpoch(fState.Slot())
|
||||
s.finalizedInfo = &finalizedInfo{slot: st.Slot(), root: fRoot, state: st.Copy()}
|
||||
populatePubkeyCache(ctx, st)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// Pre-populate the pubkey cache with the validator public keys from the finalized state.
|
||||
// This process takes about 30 seconds on mainnet with 450,000 validators.
|
||||
func populatePubkeyCache(ctx context.Context, st state.BeaconState) {
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
go populatePubkeyCacheOnce.Do(func() {
|
||||
log.Debug("Populating pubkey cache")
|
||||
start := time.Now()
|
||||
if err := fState.ReadFromEveryValidator(func(_ int, val state.ReadOnlyValidator) error {
|
||||
if err := st.ReadFromEveryValidator(func(_ int, val state.ReadOnlyValidator) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Do not cache for non-active validators.
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, fEpoch) {
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
return nil
|
||||
}
|
||||
pub := val.PublicKey()
|
||||
@@ -169,8 +175,6 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
|
||||
}
|
||||
log.WithField("duration", time.Since(start)).Debug("Done populating pubkey cache")
|
||||
})
|
||||
|
||||
return fState, nil
|
||||
}
|
||||
|
||||
// SaveFinalizedState saves the finalized slot, root and state into memory to be used by state gen service.
|
||||
|
||||
@@ -87,6 +87,8 @@ func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.C
|
||||
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
|
||||
case p2p.AggregateAndProofSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
case p2p.AttesterSlashingSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttesterSlashingMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -137,13 +137,14 @@ func TestExtractDataType(t *testing.T) {
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
wantMd metadata.Metadata
|
||||
wantAtt ethpb.Att
|
||||
wantAggregate ethpb.SignedAggregateAttAndProof
|
||||
wantErr bool
|
||||
name string
|
||||
args args
|
||||
wantBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
wantMd metadata.Metadata
|
||||
wantAtt ethpb.Att
|
||||
wantAggregate ethpb.SignedAggregateAttAndProof
|
||||
wantAttSlashing ethpb.AttSlashing
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
@@ -156,10 +157,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
@@ -167,11 +169,12 @@ func TestExtractDataType(t *testing.T) {
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantAttSlashing: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
@@ -179,11 +182,12 @@ func TestExtractDataType(t *testing.T) {
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantAttSlashing: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
@@ -196,9 +200,10 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
@@ -211,10 +216,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
@@ -227,10 +233,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "capella fork version",
|
||||
@@ -243,10 +250,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "deneb fork version",
|
||||
@@ -259,10 +267,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "electra fork version",
|
||||
@@ -275,10 +284,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.SingleAttestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.SingleAttestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantAttSlashing: ðpb.AttesterSlashingElectra{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "fulu fork version",
|
||||
@@ -291,10 +301,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.SingleAttestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantErr: false,
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.SingleAttestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantAttSlashing: ðpb.AttesterSlashingElectra{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -323,6 +334,14 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attester slashing: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAttSlashing, tt.wantAttSlashing) {
|
||||
t.Errorf("attester slashin: got = %v, want %v", gotAttSlashing, tt.wantAttSlashing)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,3 +188,11 @@ func WithAvailableBlocker(avb coverage.AvailableBlocker) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled configures the sync package to support slashing detection.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.slasherEnabled = enabled
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,8 +140,7 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
data := att.GetData()
|
||||
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally reference checkpoint that's long ago.
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
// attestation's target intentionally referencing a checkpoint that's long ago.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
|
||||
return
|
||||
@@ -169,35 +168,57 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
return
|
||||
}
|
||||
|
||||
var singleAtt *ethpb.SingleAttestation
|
||||
// Decide if the attestation is an Electra SingleAttestation or a Phase0 unaggregated attestation
|
||||
var (
|
||||
attForValidation ethpb.Att
|
||||
broadcastAtt ethpb.Att
|
||||
eventType feed.EventType
|
||||
eventData interface{}
|
||||
)
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
var ok bool
|
||||
singleAtt, ok = att.(*ethpb.SingleAttestation)
|
||||
singleAtt, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Debugf("Attestation has wrong type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
return
|
||||
}
|
||||
att = singleAtt.ToAttestationElectra(committee)
|
||||
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
|
||||
attForValidation = singleAtt.ToAttestationElectra(committee)
|
||||
broadcastAtt = singleAtt
|
||||
eventType = operation.SingleAttReceived
|
||||
eventData = &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
}
|
||||
} else {
|
||||
// Phase0 attestation
|
||||
attForValidation = att
|
||||
broadcastAtt = att
|
||||
eventType = operation.UnaggregatedAttReceived
|
||||
eventData = &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
}
|
||||
}
|
||||
|
||||
valid, err = s.validateUnaggregatedAttWithState(ctx, att, preState)
|
||||
valid, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
|
||||
return
|
||||
}
|
||||
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.attestationCache.Add(att); err != nil {
|
||||
if err = s.cfg.attestationCache.Add(attForValidation); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(attForValidation); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
|
||||
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, attForValidation.GetCommitteeIndex(), attForValidation.GetAggregationBits())
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
if err != nil {
|
||||
@@ -205,34 +226,16 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
var attToBroadcast ethpb.Att
|
||||
if singleAtt != nil {
|
||||
attToBroadcast = singleAtt
|
||||
} else {
|
||||
attToBroadcast = att
|
||||
}
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, attToBroadcast), attToBroadcast); err != nil {
|
||||
// Broadcast the final 'broadcastAtt' object
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
if singleAtt != nil {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
// Feed event notification for other services
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: eventType,
|
||||
Data: eventData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -706,3 +706,41 @@ func Test_attsAreEqual_Committee(t *testing.T) {
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_SeenCommitteeIndicesSlot(t *testing.T) {
|
||||
t.Run("phase 0 success", func(t *testing.T) {
|
||||
s := &Service{
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(1),
|
||||
}
|
||||
data := ðpb.AttestationData{Slot: 1, CommitteeIndex: 44}
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: data,
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, att.GetCommitteeIndex(), att.GetAggregationBits())
|
||||
b := append(bytesutil.Bytes32(uint64(1)), bytesutil.Bytes32(uint64(44))...)
|
||||
b = append(b, bytesutil.SafeCopyBytes(att.GetAggregationBits())...)
|
||||
_, ok := s.seenUnAggregatedAttestationCache.Get(string(b))
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("electra success", func(t *testing.T) {
|
||||
s := &Service{
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(1),
|
||||
}
|
||||
// committee index is 0 post electra for attestation electra
|
||||
data := ðpb.AttestationData{Slot: 1, CommitteeIndex: 0}
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(uint64(63), true)
|
||||
att := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: data,
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
ci := att.GetCommitteeIndex()
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, ci, att.GetAggregationBits())
|
||||
b := append(bytesutil.Bytes32(uint64(1)), bytesutil.Bytes32(uint64(63))...)
|
||||
b = append(b, bytesutil.SafeCopyBytes(att.GetAggregationBits())...)
|
||||
_, ok := s.seenUnAggregatedAttestationCache.Get(string(b))
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -49,7 +49,12 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
available := s.validateRangeAvailability(rp)
|
||||
if !available {
|
||||
log.Debug("error in validating range availability")
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": rp.start,
|
||||
"endSlot": rp.end,
|
||||
"size": rp.size,
|
||||
"current": s.cfg.clock.CurrentSlot(),
|
||||
}).Debug("error in validating range availability")
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, p2ptypes.ErrResourceUnavailable.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil
|
||||
|
||||
@@ -361,7 +361,7 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
|
||||
v, found := ctxMap[bytesutil.ToBytes4(ctxb)]
|
||||
if !found {
|
||||
return b, errors.Wrapf(errBlobUnmarshal, fmt.Sprintf("unrecognized fork digest %#x", ctxb))
|
||||
return b, errors.Wrapf(errBlobUnmarshal, "unrecognized fork digest %#x", ctxb)
|
||||
}
|
||||
// Only deneb and electra are supported at this time, because we lack a fork-spanning interface/union type for blobs.
|
||||
// In electra, there's no changes to blob type.
|
||||
|
||||
@@ -164,6 +164,7 @@ type Service struct {
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
ctxMap ContextByteVersions
|
||||
slasherEnabled bool
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -34,7 +33,11 @@ import (
|
||||
// - The attestation is unaggregated -- that is, it has exactly one participating validator (len(get_attesting_indices(state, attestation.data, attestation.aggregation_bits)) == 1).
|
||||
// - attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot).
|
||||
// - The signature of attestation is valid.
|
||||
func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateCommitteeIndexBeaconAttestation(
|
||||
ctx context.Context,
|
||||
pid peer.ID,
|
||||
msg *pubsub.Message,
|
||||
) (pubsub.ValidationResult, error) {
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
@@ -64,6 +67,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
data := att.GetData()
|
||||
|
||||
// Do not process slot 0 attestations.
|
||||
@@ -73,8 +77,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance); err != nil {
|
||||
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(), earlyAttestationProcessingTolerance); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
@@ -84,12 +87,11 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
committeeIndex := att.GetCommitteeIndex()
|
||||
|
||||
if !features.Get().EnableSlasher {
|
||||
if !s.slasherEnabled {
|
||||
// Verify this the first attestation received for the participating validator for the slot.
|
||||
if s.hasSeenCommitteeIndicesSlot(data.Slot, committeeIndex, att.GetAggregationBits()) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject an attestation if it references an invalid block.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
|
||||
@@ -99,15 +101,12 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
}
|
||||
}
|
||||
|
||||
var validationRes pubsub.ValidationResult
|
||||
|
||||
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
return s.saveToPendingAttPool(att)
|
||||
}
|
||||
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(blockRoot) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
@@ -123,12 +122,12 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, committeeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
@@ -139,21 +138,42 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
var singleAtt *eth.SingleAttestation
|
||||
// Consolidated handling of Electra SingleAttestation vs Phase0 unaggregated attestation
|
||||
var (
|
||||
attForValidation eth.Att // what we'll pass to further validation
|
||||
eventType feed.EventType
|
||||
eventData interface{}
|
||||
)
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
singleAtt, ok = att.(*eth.SingleAttestation)
|
||||
singleAtt, ok := att.(*eth.SingleAttestation)
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.SingleAttestation{}, att)
|
||||
return pubsub.ValidationIgnore, fmt.Errorf(
|
||||
"attestation has wrong type (expected %T, got %T)",
|
||||
ð.SingleAttestation{}, att,
|
||||
)
|
||||
}
|
||||
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
|
||||
attForValidation = singleAtt.ToAttestationElectra(committee)
|
||||
eventType = operation.SingleAttReceived
|
||||
eventData = &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
}
|
||||
} else {
|
||||
// Phase0 unaggregated attestation
|
||||
attForValidation = att
|
||||
eventType = operation.UnaggregatedAttReceived
|
||||
eventData = &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
}
|
||||
att = singleAtt.ToAttestationElectra(committee)
|
||||
}
|
||||
|
||||
validationRes, err = s.validateUnaggregatedAttWithState(ctx, att, preState)
|
||||
validationRes, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
if features.Get().EnableSlasher {
|
||||
if s.slasherEnabled {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
go func() {
|
||||
@@ -172,7 +192,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, attForValidation, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -182,27 +202,16 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
}()
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
if singleAtt != nil {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
// Notify other services in the beacon node
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: eventType,
|
||||
Data: eventData,
|
||||
})
|
||||
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, att.GetAggregationBits())
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, attForValidation.GetAggregationBits())
|
||||
|
||||
msg.ValidatorData = att
|
||||
// Attach final validated attestation to the message for further pipeline use
|
||||
msg.ValidatorData = attForValidation
|
||||
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -80,7 +79,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
},
|
||||
})
|
||||
|
||||
if features.Get().EnableSlasher {
|
||||
if s.slasherEnabled {
|
||||
// Feed the block header to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
go func() {
|
||||
|
||||
@@ -92,11 +92,11 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
if err := vf.ValidProposerSignature(ctx); err != nil {
|
||||
if err := vf.SidecarParentValid(s.hasBadBlock); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if err := vf.SidecarParentValid(s.hasBadBlock); err != nil {
|
||||
if err := vf.ValidProposerSignature(ctx); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
|
||||
3
changelog/bastin_lightclient-finality-optimistic-ssz.md
Normal file
3
changelog/bastin_lightclient-finality-optimistic-ssz.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add SSZ support to light client finality and optimistic APIs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14836)
|
||||
3
changelog/dB2510_fixpruner.md
Normal file
3
changelog/dB2510_fixpruner.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed pruner to not block while pruning large database by introducing batchSize
|
||||
3
changelog/james-prysm_add-op-check-sync-committee.md
Normal file
3
changelog/james-prysm_add-op-check-sync-committee.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- /eth/v1/validator/sync_committee_contribution should check for optimistic status and return a 503 if it's optimistic.
|
||||
4
changelog/james-prysm_attestation-cleanup-suggestions.md
Normal file
4
changelog/james-prysm_attestation-cleanup-suggestions.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Ignored
|
||||
|
||||
- Cleanup single attestation code for readability.
|
||||
|
||||
7
changelog/james-prysm_cleanup-payload-conversions.md
Normal file
7
changelog/james-prysm_cleanup-payload-conversions.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Ignored
|
||||
|
||||
- code cleanup on payload types and payload conversions
|
||||
|
||||
### Fixed
|
||||
|
||||
- fixed gocognit in block conversions between json and proto types
|
||||
3
changelog/james-prysm_committee-index-log.md
Normal file
3
changelog/james-prysm_committee-index-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- add log to committee index when committeebits are not the expected length of 1
|
||||
3
changelog/james-prysm_fix-blobl-subnet-config.md
Normal file
3
changelog/james-prysm_fix-blobl-subnet-config.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed /eth/v1/config/spec displays BLOB_SIDECAR_SUBNET_COUNT,BLOB_SIDECAR_SUBNET_COUNT_ELECTRA
|
||||
4
changelog/james-prysm_fix-builder-e2e.md
Normal file
4
changelog/james-prysm_fix-builder-e2e.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Fixed
|
||||
|
||||
- Fixes printing superfluous response.WriteHeader call from error in builder.
|
||||
- Fixes e2e run with builder having wrong gaslimit header due to not being set on eth1 nodes.
|
||||
3
changelog/james-prysm_fix-electra-committee-log.md
Normal file
3
changelog/james-prysm_fix-electra-committee-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- cosmetic fix for post electra validator logs displaying attestation committee information correctly.
|
||||
3
changelog/james-prysm_fix-wrong-committee-seen.md
Normal file
3
changelog/james-prysm_fix-wrong-committee-seen.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fix inserting the wrong committee index into the seen cache for electra attestations
|
||||
3
changelog/james-prysm_update-default-gas-limit.md
Normal file
3
changelog/james-prysm_update-default-gas-limit.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Updated default gas limit from 30M to 36M
|
||||
2
changelog/kasey_delete-block-idx.md
Normal file
2
changelog/kasey_delete-block-idx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Ensure that deleting a block from the database clears its entry in the slot->root db index.
|
||||
2
changelog/kasey_event-slot-fix.md
Normal file
2
changelog/kasey_event-slot-fix.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed a bug in the event stream handler when processing payload attribute events where the timestamp and slot of the event would be based on the head rather than the current slot.
|
||||
3
changelog/manu_validators_registration_default_batch.md
Normal file
3
changelog/manu_validators_registration_default_batch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Changed
|
||||
|
||||
- `--validators-registration-batch-size`: Change default value from `0` to `200`.
|
||||
3
changelog/nisdas_fix_attester_slashing_validation.md
Normal file
3
changelog/nisdas_fix_attester_slashing_validation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Check for the correct attester slashing type during gossip validation.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user