mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
1 Commits
dev-payloa
...
use_fieldp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99b2011183 |
6
.bazelrc
6
.bazelrc
@@ -230,3 +230,9 @@ build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
# Build the binary with Beacon API calls for the validator
|
||||
build --flag_alias=use_beacon_api=//validator/client/validator-client-factory:use_beacon_api
|
||||
|
||||
build:beacon_api --use_beacon_api
|
||||
build:beacon_api --define=gotags=use_beacon_api
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -38,6 +38,3 @@ metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
45
WORKSPACE
45
WORKSPACE
@@ -88,10 +88,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "ae013bf35bd23234d1dea46b079f1e05ba74ac0321423830119d3e787ec73483",
|
||||
sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -110,6 +110,13 @@ git_repository(
|
||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "fuzzit_linux",
|
||||
build_file_content = "exports_files([\"fuzzit\"])",
|
||||
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
|
||||
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
|
||||
)
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
||||
container_repositories = "repositories",
|
||||
@@ -157,15 +164,35 @@ container_pull(
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
container_pull(
|
||||
name = "fuzzit_base",
|
||||
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
|
||||
registry = "gcr.io",
|
||||
repository = "fuzzit-public/stretch-llvm8",
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.19.4",
|
||||
go_version = "1.19.3",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "prysm_testnet_site",
|
||||
build_file_content = """
|
||||
proto_library(
|
||||
name = "faucet_proto",
|
||||
srcs = ["src/proto/faucet.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)""",
|
||||
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
|
||||
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
|
||||
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||
@@ -188,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-alpha.1"
|
||||
consensus_spec_version = "v1.2.0"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -204,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b5a65eb5ecef1c4fca82ff29739936fee019e8a529ef392ea5e46aa39f40a0b2",
|
||||
sha256 = "eded065f923a99b78372d6f748c9b3f1de8229f8f574c1fec9c5fe76c8affb65",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -220,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b381bb0184e69cb17d05fbbe75f48c6aec7726957d073e3a65c26671d5d27d37",
|
||||
sha256 = "2ed83783129e93360f4bf9d5d5f606ee28adbe8b458acdfac61b8d99218d16a9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -236,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9466f2a5a2dea039a2deb953f0b5dce5399400028bf3f218ffef03f8ef9c446c",
|
||||
sha256 = "f5eff2adac78c99a4180491f373328465263caa2cba0206308a7c598abf76cda",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -251,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "3cc3141651a320a1f5767d15826e85aaa96eb4459d9e1a1d3f5a0cdbc79b8f56",
|
||||
sha256 = "f1a33b7459391716defa4c2b6f0c1bd7ccc38471ce9126d752d3bad767bebf2b",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestClient_Status(t *testing.T) {
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -111,7 +111,6 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ type ValidatorRegistration struct {
|
||||
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
@@ -36,8 +36,8 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
@@ -49,10 +49,10 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
@@ -66,10 +66,10 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
@@ -183,11 +183,11 @@ func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
}
|
||||
|
||||
type ExecHeaderResponse struct {
|
||||
Version string `json:"version"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBid `json:"message"`
|
||||
} `json:"data"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
Message *BuilderBid `json:"message,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||
@@ -233,26 +233,26 @@ func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
|
||||
}
|
||||
|
||||
type BuilderBid struct {
|
||||
Header *ExecutionPayloadHeader `json:"header"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
Header *ExecutionPayloadHeader `json:"header,omitempty"`
|
||||
Value Uint256 `json:"value,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||
BlockNumber Uint64String `json:"block_number"`
|
||||
GasLimit Uint64String `json:"gas_limit"`
|
||||
GasUsed Uint64String `json:"gas_used"`
|
||||
Timestamp Uint64String `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
|
||||
*v1.ExecutionPayloadHeader
|
||||
}
|
||||
|
||||
@@ -294,25 +294,25 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
|
||||
type ExecPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data ExecutionPayload `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data ExecutionPayload `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||
BlockNumber Uint64String `json:"block_number"`
|
||||
GasLimit Uint64String `json:"gas_limit"`
|
||||
GasUsed Uint64String `json:"gas_used"`
|
||||
Timestamp Uint64String `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||
Transactions []hexutil.Bytes `json:"transactions"`
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
@@ -356,8 +356,8 @@ type BlindedBeaconBlockBodyBellatrix struct {
|
||||
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
@@ -367,10 +367,10 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
@@ -386,8 +386,8 @@ type ProposerSlashing struct {
|
||||
|
||||
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
|
||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
|
||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
|
||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
|
||||
}{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
|
||||
@@ -400,8 +400,8 @@ type SignedBeaconBlockHeader struct {
|
||||
|
||||
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Header *BeaconBlockHeader `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Header *BeaconBlockHeader `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
|
||||
Signature: h.SignedBeaconBlockHeader.Signature,
|
||||
@@ -414,11 +414,11 @@ type BeaconBlockHeader struct {
|
||||
|
||||
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||
StateRoot hexutil.Bytes `json:"state_root"`
|
||||
BodyRoot hexutil.Bytes `json:"body_root"`
|
||||
Slot string `json:"slot,omitempty"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
|
||||
@@ -438,9 +438,9 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
||||
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
AttestingIndices []string `json:"attesting_indices"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
AttestingIndices []string `json:"attesting_indices,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
AttestingIndices: indices,
|
||||
Data: &AttestationData{a.IndexedAttestation.Data},
|
||||
@@ -454,8 +454,8 @@ type AttesterSlashing struct {
|
||||
|
||||
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1"`
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2"`
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
|
||||
}{
|
||||
Attestation1: &IndexedAttestation{s.Attestation_1},
|
||||
Attestation2: &IndexedAttestation{s.Attestation_2},
|
||||
@@ -468,8 +468,8 @@ type Checkpoint struct {
|
||||
|
||||
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Root hexutil.Bytes `json:"root"`
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
Root hexutil.Bytes `json:"root,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
|
||||
Root: c.Checkpoint.Root,
|
||||
@@ -482,11 +482,11 @@ type AttestationData struct {
|
||||
|
||||
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
Index string `json:"index"`
|
||||
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
|
||||
Source *Checkpoint `json:"source"`
|
||||
Target *Checkpoint `json:"target"`
|
||||
Slot string `json:"slot,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
|
||||
Source *Checkpoint `json:"source,omitempty"`
|
||||
Target *Checkpoint `json:"target,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
|
||||
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
|
||||
@@ -502,9 +502,9 @@ type Attestation struct {
|
||||
|
||||
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
|
||||
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
|
||||
}{
|
||||
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
|
||||
Data: &AttestationData{a.Attestation.Data},
|
||||
@@ -518,10 +518,10 @@ type DepositData struct {
|
||||
|
||||
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
PublicKey hexutil.Bytes `json:"pubkey"`
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
|
||||
Amount string `json:"amount,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
PublicKey: d.PublicKey,
|
||||
WithdrawalCredentials: d.WithdrawalCredentials,
|
||||
@@ -554,8 +554,8 @@ type SignedVoluntaryExit struct {
|
||||
|
||||
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *VoluntaryExit `json:"message"`
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *VoluntaryExit `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Signature: sve.SignedVoluntaryExit.Signature,
|
||||
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
|
||||
@@ -568,8 +568,8 @@ type VoluntaryExit struct {
|
||||
|
||||
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
ValidatorIndex string `json:"validator_index,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", ve.Epoch),
|
||||
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
|
||||
@@ -582,8 +582,8 @@ type SyncAggregate struct {
|
||||
|
||||
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
|
||||
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
|
||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
|
||||
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
|
||||
}{
|
||||
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
|
||||
@@ -596,9 +596,9 @@ type Eth1Data struct {
|
||||
|
||||
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
DepositRoot hexutil.Bytes `json:"deposit_root"`
|
||||
DepositCount string `json:"deposit_count"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
|
||||
DepositCount string `json:"deposit_count,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
}{
|
||||
DepositRoot: e.DepositRoot,
|
||||
DepositCount: fmt.Sprintf("%d", e.DepositCount),
|
||||
@@ -628,16 +628,16 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||
Attestations []*Attestation `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
|
||||
Attestations []*Attestation `json:"attestations,omitempty"`
|
||||
Deposits []*Deposit `json:"deposits,omitempty"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
|
||||
@@ -53,7 +53,6 @@ go_library(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -64,7 +63,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -181,7 +181,7 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
|
||||
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
|
||||
if ok {
|
||||
return s.headState(ctx)
|
||||
return s.headState(ctx), nil
|
||||
}
|
||||
|
||||
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
||||
@@ -195,11 +195,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch)
|
||||
if !s.hasHeadState() {
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
hs, err := s.headState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, hs, epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
|
||||
@@ -198,11 +198,7 @@ func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
s1, err := headState.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := s.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, s1, s2, "Incorrect head state received")
|
||||
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
|
||||
@@ -19,6 +19,8 @@ var (
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// block is not a valid optimistic candidate block
|
||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
||||
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
|
||||
|
||||
@@ -15,11 +15,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -69,7 +67,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head payload attribute")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
@@ -148,8 +150,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
if hasAttr && payloadID != nil {
|
||||
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
@@ -182,7 +183,7 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
postStateHeader interfaces.ExecutionDataHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
|
||||
postStateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
@@ -250,29 +251,22 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, payloadattribute.Attributer, types.ValidatorIndex) {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
|
||||
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, emptyAttri, 0
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
st, err := st.Copy()
|
||||
st = st.Copy()
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not copy state")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
st, err = transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, nil, 0, err
|
||||
}
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
@@ -290,8 +284,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
}
|
||||
@@ -299,44 +292,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, nil, 0, err
|
||||
}
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
}
|
||||
default:
|
||||
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
|
||||
return false, emptyAttri, 0
|
||||
attr := &enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
}
|
||||
|
||||
return true, attr, proposerID
|
||||
return true, attr, proposerID, nil
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
bstate "github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -31,69 +30,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
b, err := consensusblocks.NewBeaconBlock(ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
pid := &v1.PayloadIDBytes{1}
|
||||
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{PayloadIDBytes: pid}
|
||||
st, _ = util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||
|
||||
// Intentionally generate a bad state such that `hash_tree_root` fails during `process_slot`
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: s,
|
||||
headRoot: [32]byte{},
|
||||
headBlock: b,
|
||||
}
|
||||
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(1, 0, [8]byte{}, [32]byte{})
|
||||
got, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got, pid) // We still get a payload ID even though the state is bad. This means it returns until the end.
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -111,12 +47,11 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -792,8 +727,8 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
@@ -801,65 +736,24 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
|
||||
@@ -201,15 +201,11 @@ func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, sta
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copiedState, err := state.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.head = &head{
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: copiedState,
|
||||
state: state.Copy(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -262,7 +258,7 @@ func (s *Service) headBlock() (interfaces.SignedBeaconBlock, error) {
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) (state.BeaconState, error) {
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -89,13 +89,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
pb, err := headBlock.Proto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||
headStateProto, err := headState.ToProto()
|
||||
require.NoError(t, err)
|
||||
hs, err := service.headState(ctx)
|
||||
require.NoError(t, err)
|
||||
serviceHeadStateProto, err := hs.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headStateProto, serviceHeadStateProto, "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
@@ -153,13 +147,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
pb, err := headBlock.Proto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||
headStateProto, err := headState.ToProto()
|
||||
require.NoError(t, err)
|
||||
hs, err := service.headState(ctx)
|
||||
require.NoError(t, err)
|
||||
serviceHeadStateProto, err := hs.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headStateProto, serviceHeadStateProto, "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
require.LogsContain(t, hook, "distance=1")
|
||||
require.LogsContain(t, hook, "depth=1")
|
||||
@@ -513,7 +501,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
|
||||
require.NoError(t, service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState))
|
||||
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
|
||||
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
@@ -117,24 +117,12 @@ func logPayload(block interfaces.BeaconBlock) error {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
fields := logrus.Fields{
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
fields["withdrawals"] = len(withdrawals)
|
||||
changes, err := block.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
}).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -328,7 +328,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix:
|
||||
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -338,11 +338,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
return err
|
||||
}
|
||||
default:
|
||||
st, err := headState.ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("invalid state type provided: %T", st)
|
||||
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
@@ -100,14 +99,6 @@ func WithSlashingPool(p slashings.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithBLSToExecPool to keep track of BLS to Execution address changes.
|
||||
func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.BLSToExecPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -146,8 +146,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied, err := genesisState.Copy()
|
||||
require.NoError(t, err)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
@@ -257,11 +256,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s1, err := returned.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := cached.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2)
|
||||
require.DeepSSZEqual(t, returned.ToProtoUnsafe(), cached.ToProtoUnsafe())
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
|
||||
@@ -146,10 +146,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
if err := s.handleBlockBLSToExecChanges(signed.Block()); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -282,11 +278,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionDataHeader, error) {
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionData, error) {
|
||||
if st == nil {
|
||||
return 0, nil, errors.New("nil state")
|
||||
}
|
||||
var preStateHeader interfaces.ExecutionDataHeader
|
||||
var preStateHeader interfaces.ExecutionData
|
||||
var err error
|
||||
preStateVersion := st.Version()
|
||||
switch preStateVersion {
|
||||
@@ -344,7 +340,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionDataHeader
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
@@ -366,11 +362,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
st, err := preState.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
boundaries[blockRoots[i]] = st
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
fCheckpoints[i] = preState.FinalizedCheckpoint()
|
||||
@@ -480,11 +472,8 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
defer span.End()
|
||||
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
copied, err := postState.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copied, err = transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
copied := postState.Copy()
|
||||
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -566,22 +555,6 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, change := range changes {
|
||||
if err := s.cfg.BLSToExecPool.MarkIncluded(change); err != nil {
|
||||
return errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
@@ -634,7 +607,7 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
||||
}
|
||||
|
||||
// validateMergeTransitionBlock validates the merge transition block.
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionDataHeader, blk interfaces.SignedBeaconBlock) error {
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) error {
|
||||
// Skip validation if block is older than Bellatrix.
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return nil
|
||||
@@ -661,7 +634,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
|
||||
// Skip validation if the block is not a merge transition block.
|
||||
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
|
||||
empty, err := consensusblocks.IsEmptyExecutionDataHeader(stateHeader)
|
||||
empty, err := consensusblocks.IsEmptyExecutionData(stateHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -725,12 +698,8 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) e
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
hs, err := s.headState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: hs,
|
||||
headState: s.headState(ctx),
|
||||
headRoot: s.headRoot(),
|
||||
headBlock: headBlock.Block(),
|
||||
}); err != nil {
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
@@ -66,9 +65,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
copiedSt, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt, validGenesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := util.NewBeaconBlock()
|
||||
@@ -78,23 +75,11 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
copiedSt1, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt1, randomParentRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
copiedSt2, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt2, bytesutil.ToBytes32(randomParentRoot2)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
|
||||
c1, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
c2, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
c3, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
c4, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.SignedBeaconBlock
|
||||
@@ -105,7 +90,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: util.NewBeaconBlock(),
|
||||
s: c1,
|
||||
s: st.Copy(),
|
||||
wantErrString: "could not reconstruct parent state",
|
||||
},
|
||||
{
|
||||
@@ -116,7 +101,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: c2,
|
||||
s: st.Copy(),
|
||||
wantErrString: "is in the far distant future",
|
||||
},
|
||||
{
|
||||
@@ -126,7 +111,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
b.Block.ParentRoot = randomParentRoot[:]
|
||||
return b
|
||||
}(),
|
||||
s: c3,
|
||||
s: st.Copy(),
|
||||
wantErrString: "is not a descendant of the current finalized block",
|
||||
},
|
||||
{
|
||||
@@ -137,7 +122,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
return b
|
||||
}(),
|
||||
s: c4,
|
||||
s: st.Copy(),
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
@@ -171,8 +156,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
@@ -215,8 +199,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
@@ -386,9 +369,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
copied, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, validGenesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
b63 := util.NewBeaconBlock()
|
||||
@@ -447,9 +428,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
copied, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, validGenesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -543,25 +522,17 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
|
||||
if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copied1, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
if err := beaconDB.SaveState(context.Background(), copied1, bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
copied2, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
if err := beaconDB.SaveState(context.Background(), copied2, r1); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copied3, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
if err := beaconDB.SaveState(context.Background(), copied3, r7); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copied4, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
if err := beaconDB.SaveState(context.Background(), copied4, r8); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
@@ -852,8 +823,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
|
||||
testState, err := gs.Copy()
|
||||
require.NoError(t, err)
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
@@ -902,8 +872,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
testState, err := gs.Copy()
|
||||
require.NoError(t, err)
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
@@ -1005,8 +974,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
testState, err := gs.Copy()
|
||||
require.NoError(t, err)
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
@@ -1031,8 +999,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs, err = gs.Copy()
|
||||
require.NoError(t, err)
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
@@ -1066,13 +1033,11 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs, err = gs.Copy()
|
||||
require.NoError(t, err)
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
gs2, err := gs.Copy()
|
||||
require.NoError(t, err)
|
||||
gs2 := gs.Copy()
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
@@ -2339,65 +2304,6 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
|
||||
}
|
||||
|
||||
func TestHandleBBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
pbb := ðpb.BeaconBlockBellatrix{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyCapella{}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
idx := types.ValidatorIndex(123)
|
||||
change := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: idx,
|
||||
}
|
||||
signedChange := ðpb.SignedBLSToExecutionChange{
|
||||
Message: change,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
|
||||
}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -109,8 +109,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied, err := genesisState.Copy()
|
||||
require.NoError(t, err)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
@@ -213,8 +212,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
copied, err := genesisState.Copy()
|
||||
require.NoError(t, err)
|
||||
copied := genesisState.Copy()
|
||||
// Generate a new block for attesters to attest
|
||||
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
@@ -271,8 +269,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
copied, err := genesisState.Copy()
|
||||
require.NoError(t, err)
|
||||
copied := genesisState.Copy()
|
||||
// Generate a new block
|
||||
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
//params.SetupTestConfigCleanupWithLock(t)
|
||||
params.SetupTestConfigCleanupWithLock(t)
|
||||
bc := params.BeaconConfig().Copy()
|
||||
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
f "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
@@ -71,7 +70,6 @@ type config struct {
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
@@ -431,9 +429,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
}
|
||||
|
||||
s.originBlockRoot = genesisBlkRoot
|
||||
if err := s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState); err != nil {
|
||||
return err
|
||||
}
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
|
||||
@@ -83,9 +83,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
pbUnsafe, err := bState.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(pbUnsafe)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
require.NoError(t, err)
|
||||
@@ -327,11 +325,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
assert.DeepEqual(t, headBlock, pb, "Head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
s1, err := headState.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := s.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, s1, s2, "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -386,11 +380,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
s1, err := headState.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := s.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, s1, s2, "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
|
||||
pb, err := c.head.block.Proto()
|
||||
require.NoError(t, err)
|
||||
|
||||
20
beacon-chain/cache/checkpoint_state_test.go
vendored
20
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -33,13 +33,9 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbs1, err := s.ToProtoUnsafe()
|
||||
pbState1, err := state_native.ProtobufBeaconStatePhase0(s.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
pbState1, err := state_native.ProtobufBeaconStatePhase0(pbs1)
|
||||
require.NoError(t, err)
|
||||
pbs2, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(pbs2)
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(st.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState1, pbstate) {
|
||||
t.Error("incorrectly cached state")
|
||||
@@ -54,19 +50,11 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
|
||||
s, err = cache.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
sProto, err := s.ToProto()
|
||||
require.NoError(t, err)
|
||||
st2Proto, err := st2.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st2Proto, sProto, "incorrectly cached state")
|
||||
assert.DeepEqual(t, st2.ToProto(), s.ToProto(), "incorrectly cached state")
|
||||
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
stProto, err := st.ToProto()
|
||||
require.NoError(t, err)
|
||||
sProto, err = s.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, stProto, sProto, "incorrectly cached state")
|
||||
assert.DeepEqual(t, st.ToProto(), s.ToProto(), "incorrectly cached state")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
|
||||
17
beacon-chain/cache/skip_slot_cache.go
vendored
17
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -94,11 +94,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", true))
|
||||
c, err := item.(state.BeaconState).Copy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
return item.(state.BeaconState).Copy(), nil
|
||||
}
|
||||
skipSlotCacheMiss.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", false))
|
||||
@@ -136,15 +132,10 @@ func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// Copy state so cached value is not mutated.
|
||||
cpy, err := state.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.cache.Add(r, cpy)
|
||||
return nil
|
||||
c.cache.Add(r, state.Copy())
|
||||
}
|
||||
|
||||
8
beacon-chain/cache/skip_slot_cache_test.go
vendored
8
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -28,14 +28,10 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, r, s))
|
||||
c.Put(ctx, r, s)
|
||||
c.MarkNotInProgress(r)
|
||||
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
resProto, err := res.ToProto()
|
||||
require.NoError(t, err)
|
||||
sProto, err := s.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, resProto, sProto, "Expected equal protos to return from cache")
|
||||
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ go_test(
|
||||
"deposit_test.go",
|
||||
"epoch_precompute_test.go",
|
||||
"epoch_spec_test.go",
|
||||
"exports_test.go",
|
||||
"reward_test.go",
|
||||
"sync_committee_test.go",
|
||||
"transition_test.go",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
p2pType "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
@@ -45,7 +46,7 @@ import (
|
||||
// else:
|
||||
// decrease_balance(state, participant_index, participant_reward)
|
||||
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
|
||||
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
|
||||
votedKeys, votedIndices, didntVoteIndices, err := FilterSyncCommitteeVotes(s, sync)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter sync committee votes")
|
||||
}
|
||||
@@ -53,70 +54,50 @@ func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
|
||||
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify sync committee signature")
|
||||
}
|
||||
return s, nil
|
||||
|
||||
return ApplySyncRewardsPenalties(ctx, s, votedIndices, didntVoteIndices)
|
||||
}
|
||||
|
||||
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
|
||||
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
|
||||
// public keys that voted, for future signature verification.
|
||||
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
|
||||
state.BeaconState,
|
||||
[]bls.PublicKey,
|
||||
error) {
|
||||
// FilterSyncCommitteeVotes filters the validator public keys and indices for the ones that voted and didn't vote.
|
||||
func FilterSyncCommitteeVotes(s state.BeaconState, sync *ethpb.SyncAggregate) (
|
||||
votedKeys []bls.PublicKey,
|
||||
votedIndices []types.ValidatorIndex,
|
||||
didntVoteIndices []types.ValidatorIndex,
|
||||
err error) {
|
||||
currentSyncCommittee, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if currentSyncCommittee == nil {
|
||||
return nil, nil, errors.New("nil current sync committee in state")
|
||||
return nil, nil, nil, errors.New("nil current sync committee in state")
|
||||
}
|
||||
committeeKeys := currentSyncCommittee.Pubkeys
|
||||
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
|
||||
return nil, nil, errors.New("bits length exceeds committee length")
|
||||
return nil, nil, nil, errors.New("bits length exceeds committee length")
|
||||
}
|
||||
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
|
||||
votedKeys = make([]bls.PublicKey, 0, len(committeeKeys))
|
||||
votedIndices = make([]types.ValidatorIndex, 0, len(committeeKeys))
|
||||
didntVoteIndices = make([]types.ValidatorIndex, 0) // No allocation. Expect most votes.
|
||||
|
||||
activeBalance, err := helpers.TotalActiveBalance(s)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerReward, participantReward, err := SyncRewards(activeBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
earnedProposerReward := uint64(0)
|
||||
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
|
||||
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
|
||||
// Impossible scenario.
|
||||
if !exists {
|
||||
return nil, nil, errors.New("validator public key does not exist in state")
|
||||
return nil, nil, nil, errors.New("validator public key does not exist in state")
|
||||
}
|
||||
|
||||
if sync.SyncCommitteeBits.BitAt(i) {
|
||||
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
votedKeys = append(votedKeys, pubKey)
|
||||
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
earnedProposerReward += proposerReward
|
||||
votedIndices = append(votedIndices, vIdx)
|
||||
} else {
|
||||
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
didntVoteIndices = append(didntVoteIndices, vIdx)
|
||||
}
|
||||
}
|
||||
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return s, votedKeys, err
|
||||
return
|
||||
}
|
||||
|
||||
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
|
||||
@@ -145,6 +126,43 @@ func VerifySyncCommitteeSig(s state.BeaconState, syncKeys []bls.PublicKey, syncS
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplySyncRewardsPenalties applies rewards and penalties for proposer and sync committee participants.
|
||||
func ApplySyncRewardsPenalties(ctx context.Context, s state.BeaconState, votedIndices, didntVoteIndices []types.ValidatorIndex) (state.BeaconState, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerReward, participantReward, err := SyncRewards(activeBalance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply sync committee rewards.
|
||||
earnedProposerReward := uint64(0)
|
||||
for _, index := range votedIndices {
|
||||
if err := helpers.IncreaseBalance(s, index, participantReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
earnedProposerReward += proposerReward
|
||||
}
|
||||
// Apply proposer rewards.
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Apply sync committee penalties.
|
||||
for _, index := range didntVoteIndices {
|
||||
if err := helpers.DecreaseBalance(s, index, participantReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SyncRewards returns the proposer reward and the sync participant reward given the total active balance in state.
|
||||
func SyncRewards(activeBalance uint64) (proposerReward, participantReward uint64, err error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
@@ -168,36 +168,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// This is a regression test #11696
|
||||
func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
committeeKeys := committee.Pubkeys
|
||||
committeeKeys[1] = committeeKeys[0]
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
idx, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[0]))
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xFF
|
||||
}
|
||||
syncBits.SetBitAt(0, false)
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
|
||||
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 511, len(votedKeys))
|
||||
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
|
||||
balances := st.Balances()
|
||||
require.Equal(t, uint64(988), balances[idx])
|
||||
}
|
||||
|
||||
func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
func TestProcessSyncCommittee_FilterSyncCommitteeVotes(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
@@ -212,40 +183,25 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
SyncCommitteeBits: syncBits,
|
||||
}
|
||||
|
||||
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
|
||||
votedKeys, votedIndices, didntVoteIndices, err := altair.FilterSyncCommitteeVotes(beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
|
||||
for _, key := range votedKeys {
|
||||
votedMap[bytesutil.ToBytes48(key.Marshal())] = true
|
||||
}
|
||||
require.Equal(t, int(syncBits.Len()/2), len(votedKeys))
|
||||
|
||||
currentSyncCommittee, err := st.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
committeeKeys := currentSyncCommittee.Pubkeys
|
||||
balances := st.Balances()
|
||||
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(syncBits.Len()/2), len(votedIndices))
|
||||
require.Equal(t, int(syncBits.Len()/2), len(didntVoteIndices))
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
if syncBits.BitAt(uint64(i)) {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
pk := beaconState.PubkeyAtIndex(votedIndices[i])
|
||||
require.DeepEqual(t, true, votedMap[pk])
|
||||
idx, ok := st.ValidatorIndexByPubkey(pk)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, uint64(32000000988), balances[idx])
|
||||
} else {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
pk := beaconState.PubkeyAtIndex(didntVoteIndices[i])
|
||||
require.DeepEqual(t, false, votedMap[pk])
|
||||
idx, ok := st.ValidatorIndexByPubkey(pk)
|
||||
require.Equal(t, true, ok)
|
||||
if idx != proposerIndex {
|
||||
require.Equal(t, uint64(31999999012), balances[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
require.Equal(t, uint64(32000035108), balances[proposerIndex])
|
||||
}
|
||||
|
||||
func Test_VerifySyncCommitteeSig(t *testing.T) {
|
||||
@@ -284,6 +240,22 @@ func Test_VerifySyncCommitteeSig(t *testing.T) {
|
||||
require.NoError(t, altair.VerifySyncCommitteeSig(beaconState, pks, aggregatedSig))
|
||||
}
|
||||
|
||||
func Test_ApplySyncRewardsPenalties(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
beaconState, err := altair.ApplySyncRewardsPenalties(context.Background(), beaconState,
|
||||
[]types.ValidatorIndex{0, 1}, // voted
|
||||
[]types.ValidatorIndex{2, 3}) // didn't vote
|
||||
require.NoError(t, err)
|
||||
balances := beaconState.Balances()
|
||||
require.Equal(t, uint64(32000000988), balances[0])
|
||||
require.Equal(t, balances[0], balances[1])
|
||||
require.Equal(t, uint64(31999999012), balances[2])
|
||||
require.Equal(t, balances[2], balances[3])
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(32000000282), balances[proposerIndex])
|
||||
}
|
||||
|
||||
func Test_SyncRewards(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -307,8 +307,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
sCopy, err := s.Copy()
|
||||
require.NoError(t, err)
|
||||
sCopy := s.Copy()
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
package altair
|
||||
|
||||
var ProcessSyncAggregateEported = processSyncAggregate
|
||||
@@ -72,8 +72,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
|
||||
func TestUpgradeToAltair(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
preForkState := st.Copy()
|
||||
aState, err := altair.UpgradeToAltair(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -41,8 +41,8 @@ go_library(
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
@@ -70,7 +70,6 @@ go_test(
|
||||
"deposit_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
"genesis_test.go",
|
||||
"header_test.go",
|
||||
"payload_test.go",
|
||||
@@ -98,8 +97,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
@@ -41,7 +41,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := blocks.IsEmptyExecutionDataHeader(h)
|
||||
isEmpty, err := blocks.IsEmptyExecutionData(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -87,6 +87,9 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
if st.Version() > version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -96,8 +99,8 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionDataHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
isEmpty, err := blocks.IsEmptyExecutionDataHeader(header)
|
||||
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionData, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
isEmpty, err := blocks.IsEmptyExecutionData(header)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -120,7 +123,7 @@ func IsPreBellatrixVersion(v int) bool {
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionDataHeader) error {
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -223,7 +226,7 @@ func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (sta
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionDataHeader) error {
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
@@ -244,7 +247,7 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interf
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionDataHeader) error {
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
@@ -266,7 +269,7 @@ func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionDataHeader) (state.BeaconState, error) {
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -349,6 +349,15 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
func Test_IsExecutionEnabledCapella(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
body, err := consensusblocks.NewBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -737,8 +746,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
emptySt := st.Copy()
|
||||
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
|
||||
@@ -8,43 +8,17 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
|
||||
func ProcessBLSToExecutionChanges(
|
||||
st state.BeaconState,
|
||||
signed interfaces.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
if signed.Version() < version.Capella {
|
||||
return st, nil
|
||||
}
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
// Return early if no changes
|
||||
if len(changes) == 0 {
|
||||
return st, nil
|
||||
}
|
||||
for _, change := range changes {
|
||||
st, err = processBLSToExecutionChange(st, change)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process BLSToExecutionChange")
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// processBLSToExecutionChange validates a SignedBLSToExecution message and
|
||||
// ProcessBLSToExecutionChange validates a SignedBLSToExecution message and
|
||||
// changes the validator's withdrawal address accordingly.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -65,30 +39,13 @@ func ProcessBLSToExecutionChanges(
|
||||
// + b'\x00' * 11
|
||||
// + address_change.to_execution_address
|
||||
// )
|
||||
func processBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
|
||||
// Checks that the message passes the validation conditions.
|
||||
val, err := ValidateBLSToExecutionChange(st, signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message := signed.Message
|
||||
newCredentials := make([]byte, executionToBLSPadding)
|
||||
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
|
||||
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
|
||||
return st, err
|
||||
}
|
||||
|
||||
// ValidateBLSToExecutionChange validates the execution change message against the state and returns the
|
||||
// validator referenced by the message.
|
||||
func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.SignedBLSToExecutionChange) (*ethpb.Validator, error) {
|
||||
func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
|
||||
if signed == nil {
|
||||
return nil, errNilSignedWithdrawalMessage
|
||||
return st, errNilSignedWithdrawalMessage
|
||||
}
|
||||
message := signed.Message
|
||||
if message == nil {
|
||||
return nil, errNilWithdrawalMessage
|
||||
return st, errNilWithdrawalMessage
|
||||
}
|
||||
|
||||
val, err := st.ValidatorAtIndex(message.ValidatorIndex)
|
||||
@@ -102,12 +59,26 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
|
||||
// hash the public key and verify it matches the withdrawal credentials
|
||||
fromPubkey := message.FromBlsPubkey
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(fromPubkey)
|
||||
if !bytes.Equal(digest[1:], cred[1:]) {
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(fromPubkey[:32]), bytesutil.ToBytes32(fromPubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
if !bytes.Equal(digest[0][1:], cred[1:]) {
|
||||
return nil, errInvalidWithdrawalCredentials
|
||||
}
|
||||
return val, nil
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := signing.VerifySigningRoot(message, fromPubkey, signed.Signature, domain); err != nil {
|
||||
return nil, signing.ErrSigFailedToVerify
|
||||
}
|
||||
newCredentials := make([]byte, executionToBLSPadding)
|
||||
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
|
||||
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
|
||||
return st, err
|
||||
}
|
||||
|
||||
func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal) (state.BeaconState, error) {
|
||||
@@ -150,37 +121,3 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func BLSChangesSignatureBatch(
|
||||
st state.ReadOnlyBeaconState,
|
||||
changes []*ethpb.SignedBLSToExecutionChange,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
// Return early if no changes
|
||||
if len(changes) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
batch := &bls.SignatureBatch{
|
||||
Signatures: make([][]byte, len(changes)),
|
||||
PublicKeys: make([]bls.PublicKey, len(changes)),
|
||||
Messages: make([][32]byte, len(changes)),
|
||||
}
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, change := range changes {
|
||||
batch.Signatures[i] = change.Signature
|
||||
publicKey, err := bls.PublicKeyFromBytes(change.Message.FromBlsPubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
batch.PublicKeys[i] = publicKey
|
||||
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
batch.Messages[i] = htr
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
@@ -10,12 +10,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
@@ -34,13 +32,14 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[:],
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -69,47 +68,6 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
|
||||
require.DeepEqual(t, message.ToExecutionAddress, val.WithdrawalCredentials[12:])
|
||||
})
|
||||
t.Run("happy case only validation", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[:],
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
val, err := blocks.ValidateBLSToExecutionChange(st, signed)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, digest[:], val.WithdrawalCredentials)
|
||||
})
|
||||
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
@@ -122,13 +80,14 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[:],
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -201,13 +160,15 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[:],
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
registry[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
@@ -547,7 +508,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
|
||||
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
|
||||
l, err := st.NextWithdrawalValidatorIndex()
|
||||
l, err := st.LastWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
|
||||
|
||||
@@ -617,137 +578,3 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessBLSToExecutionChanges(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: signedChanges,
|
||||
}
|
||||
bpb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
sbpb := ðpb.SignedBeaconBlockCapella{
|
||||
Block: bpb,
|
||||
}
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(sbpb)
|
||||
require.NoError(t, err)
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, signed)
|
||||
require.NoError(t, err)
|
||||
vals := st.Validators()
|
||||
for _, val := range vals {
|
||||
require.DeepEqual(t, executionAddress, val.WithdrawalCredentials[12:])
|
||||
require.Equal(t, params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, val.WithdrawalCredentials[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
|
||||
require.NoError(t, err)
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verify)
|
||||
}
|
||||
|
||||
@@ -15,8 +15,7 @@ import (
|
||||
|
||||
func TestUpgradeToCapella(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
preForkState := st.Copy()
|
||||
mSt, err := capella.UpgradeToCapella(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -96,7 +95,7 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), nwi)
|
||||
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
lwvi, err := mSt.LastWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.ValidatorIndex(0), lwvi)
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func processJustificationBits(state state.BeaconState, totalActiveBalance, prevE
|
||||
return newBits
|
||||
}
|
||||
|
||||
// weighJustificationAndFinalization processes justification and finalization during
|
||||
// updateJustificationAndFinalization processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
|
||||
jc, fc, err := computeCheckpoints(state, newBits)
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
|
||||
func TestUpgradeToBellatrix(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
preForkState := st.Copy()
|
||||
mSt, err := execution.UpgradeToBellatrix(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -177,14 +177,6 @@ func CommitteeAssignments(
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
minValidStartSlot := types.Slot(0)
|
||||
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
}
|
||||
if startSlot < minValidStartSlot {
|
||||
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
}
|
||||
|
||||
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
|
||||
@@ -235,25 +235,6 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: params.BeaconConfig().SlotsPerHistoricalRoot + 1,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, 0)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
@@ -47,12 +47,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj f
|
||||
// domain=domain,
|
||||
// ))
|
||||
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
|
||||
return SigningData(object.HashTreeRoot, domain)
|
||||
return signingData(object.HashTreeRoot, domain)
|
||||
}
|
||||
|
||||
// Computes the signing data by utilising the provided root function and then
|
||||
// returning the signing data of the container object.
|
||||
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
objRoot, err := rootFunc()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -107,7 +107,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := SigningData(blkHdr.HashTreeRoot, domain)
|
||||
root, err := signingData(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -146,7 +146,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
// utilize custom block hashing function
|
||||
root, err := SigningData(rootFunc, domain)
|
||||
root, err := signingData(rootFunc, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
|
||||
@@ -50,9 +50,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
c, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), c, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -98,8 +96,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c, err = beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
c := beaconState.Copy()
|
||||
sig, err := util.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
@@ -140,9 +137,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
c, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), c, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -189,8 +184,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c, err = beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
c := beaconState.Copy()
|
||||
sig, err := util.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
@@ -241,8 +235,7 @@ func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
tState, err := beaconState.Copy()
|
||||
assert.NoError(t, err)
|
||||
tState := beaconState.Copy()
|
||||
blk, err := util.GenerateFullBlockAltair(tState, privKeys,
|
||||
&util.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -52,9 +52,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -100,8 +98,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
c := beaconState.Copy()
|
||||
sig, err := util.BlockSignature(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
@@ -142,9 +139,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -191,8 +186,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
c := beaconState.Copy()
|
||||
sig, err := util.BlockSignature(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
|
||||
@@ -25,8 +25,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
defer undo()
|
||||
beaconState, err := benchmark.PreGenState1Epoch()
|
||||
require.NoError(b, err)
|
||||
cleanStates, err := clonedStates(beaconState)
|
||||
require.NoError(b, err)
|
||||
cleanStates := clonedStates(beaconState)
|
||||
block, err := benchmark.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -46,8 +45,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
|
||||
beaconState, err := benchmark.PreGenState1Epoch()
|
||||
require.NoError(b, err)
|
||||
cleanStates, err := clonedStates(beaconState)
|
||||
require.NoError(b, err)
|
||||
cleanStates := clonedStates(beaconState)
|
||||
block, err := benchmark.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -90,9 +88,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
|
||||
// at run time.
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ProcessEpochPrecompute(context.Background(), copied)
|
||||
_, err := coreState.ProcessEpochPrecompute(context.Background(), beaconState.Copy())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
@@ -128,9 +124,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
pb, err := beaconState.ToProtoUnsafe()
|
||||
require.NoError(b, err)
|
||||
natState, err := state_native.ProtobufBeaconStatePhase0(pb)
|
||||
natState, err := state_native.ProtobufBeaconStatePhase0(beaconState.ToProtoUnsafe())
|
||||
require.NoError(b, err)
|
||||
b.Run("Proto_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
@@ -154,9 +148,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
pb, err := beaconState.ToProtoUnsafe()
|
||||
require.NoError(b, err)
|
||||
natState, err := state_native.ProtobufBeaconStatePhase0(pb)
|
||||
natState, err := state_native.ProtobufBeaconStatePhase0(beaconState.ToProtoUnsafe())
|
||||
require.NoError(b, err)
|
||||
protoObject, err := proto.Marshal(natState)
|
||||
require.NoError(b, err)
|
||||
@@ -181,14 +173,10 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
})
|
||||
}
|
||||
|
||||
func clonedStates(beaconState state.BeaconState) ([]state.BeaconState, error) {
|
||||
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
|
||||
clonedStates := make([]state.BeaconState, runAmount)
|
||||
var err error
|
||||
for i := 0; i < runAmount; i++ {
|
||||
clonedStates[i], err = beaconState.Copy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clonedStates[i] = beaconState.Copy()
|
||||
}
|
||||
return clonedStates, nil
|
||||
return clonedStates
|
||||
}
|
||||
|
||||
@@ -20,9 +20,7 @@ func TestSkipSlotCache_OK(t *testing.T) {
|
||||
transition.SkipSlotCache.Enable()
|
||||
defer transition.SkipSlotCache.Disable()
|
||||
bState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
bStateProto, err := bState.ToProto()
|
||||
require.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bStateProto)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProto())
|
||||
require.NoError(t, err)
|
||||
originalState, err := state_native.InitializeFromProtoPhase0(pbState)
|
||||
require.NoError(t, err)
|
||||
@@ -44,18 +42,12 @@ func TestSkipSlotCache_OK(t *testing.T) {
|
||||
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wsb)
|
||||
require.NoError(t, err, "Could not process state transition")
|
||||
|
||||
originalStateProto, err := originalState.ToProto()
|
||||
require.NoError(t, err)
|
||||
bStateProto, err = bState.ToProto()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, originalStateProto, bStateProto, "Skipped slots cache leads to different states")
|
||||
assert.DeepEqual(t, originalState.ToProto(), bState.ToProto(), "Skipped slots cache leads to different states")
|
||||
}
|
||||
|
||||
func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
bState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
bStateProto, err := bState.ToProto()
|
||||
require.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bStateProto)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProto())
|
||||
require.NoError(t, err)
|
||||
originalState, err := state_native.InitializeFromProtoPhase0(pbState)
|
||||
require.NoError(t, err)
|
||||
@@ -78,9 +70,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
// Create two shallow but different forks
|
||||
var s1, s0 state.BeaconState
|
||||
{
|
||||
c0, err := originalState.Copy()
|
||||
require.NoError(t, err)
|
||||
blk, err := util.GenerateFullBlock(c0, privs, blkCfg, originalState.Slot()+10)
|
||||
blk, err := util.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
|
||||
require.NoError(t, err)
|
||||
copy(blk.Block.Body.Graffiti, "block 1")
|
||||
signature, err := util.BlockSignature(originalState, blk.Block, privs)
|
||||
@@ -88,14 +78,12 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
blk.Signature = signature.Marshal()
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s1, err = transition.ExecuteStateTransition(context.Background(), c0, wsb)
|
||||
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
}
|
||||
|
||||
{
|
||||
c1, err := originalState.Copy()
|
||||
require.NoError(t, err)
|
||||
blk, err := util.GenerateFullBlock(c1, privs, blkCfg, originalState.Slot()+10)
|
||||
blk, err := util.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
|
||||
require.NoError(t, err)
|
||||
copy(blk.Block.Body.Graffiti, "block 2")
|
||||
signature, err := util.BlockSignature(originalState, blk.Block, privs)
|
||||
@@ -103,7 +91,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
blk.Signature = signature.Marshal()
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s0, err = transition.ExecuteStateTransition(context.Background(), c1, wsb)
|
||||
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
}
|
||||
|
||||
@@ -128,38 +116,28 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
} else {
|
||||
st = s0
|
||||
}
|
||||
c, err := st.Copy()
|
||||
require.NoError(t, err)
|
||||
setups = append(setups, c)
|
||||
setups = append(setups, st.Copy())
|
||||
}
|
||||
|
||||
problemSlot := s1.Slot() + 2
|
||||
s1Copied, err := s1.Copy()
|
||||
require.NoError(t, err)
|
||||
expected1, err := transition.ProcessSlots(context.Background(), s1Copied, problemSlot)
|
||||
expected1, err := transition.ProcessSlots(context.Background(), s1.Copy(), problemSlot)
|
||||
require.NoError(t, err)
|
||||
expectedRoot1, err := expected1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Logf("chain 1 (even i) expected root %x at slot %d", expectedRoot1[:], problemSlot)
|
||||
|
||||
expectedS1Copied, err := expected1.Copy()
|
||||
require.NoError(t, err)
|
||||
tmp1, err := transition.ProcessSlots(context.Background(), expectedS1Copied, problemSlot+1)
|
||||
tmp1, err := transition.ProcessSlots(context.Background(), expected1.Copy(), problemSlot+1)
|
||||
require.NoError(t, err)
|
||||
gotRoot := tmp1.StateRoots()[problemSlot]
|
||||
require.DeepEqual(t, expectedRoot1[:], gotRoot, "State roots for chain 1 are bad, expected root doesn't match")
|
||||
|
||||
s0Copied, err := s0.Copy()
|
||||
require.NoError(t, err)
|
||||
expected2, err := transition.ProcessSlots(context.Background(), s0Copied, problemSlot)
|
||||
expected2, err := transition.ProcessSlots(context.Background(), s0.Copy(), problemSlot)
|
||||
require.NoError(t, err)
|
||||
expectedRoot2, err := expected2.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Logf("chain 2 (odd i) expected root %x at slot %d", expectedRoot2[:], problemSlot)
|
||||
|
||||
expectedS2Copied, err := expected2.Copy()
|
||||
require.NoError(t, err)
|
||||
tmp2, err := transition.ProcessSlots(context.Background(), expectedS2Copied, problemSlot+1)
|
||||
tmp2, err := transition.ProcessSlots(context.Background(), expected2.Copy(), problemSlot+1)
|
||||
require.NoError(t, err)
|
||||
gotRoot = tmp2.StateRoots()[problemSlot]
|
||||
require.DeepEqual(t, expectedRoot2[:], gotRoot, "State roots for chain 2 are bad, expected root doesn't match")
|
||||
|
||||
@@ -98,13 +98,9 @@ func TestGenesisState_HashEquality(t *testing.T) {
|
||||
state, err := transition.GenesisBeaconState(context.Background(), deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
require.NoError(t, err)
|
||||
|
||||
state1Proto, err := state1.ToProto()
|
||||
pbState1, err := state_native.ProtobufBeaconStatePhase0(state1.ToProto())
|
||||
require.NoError(t, err)
|
||||
stateProto, err := state.ToProto()
|
||||
require.NoError(t, err)
|
||||
pbState1, err := state_native.ProtobufBeaconStatePhase0(state1Proto)
|
||||
require.NoError(t, err)
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(stateProto)
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
|
||||
require.NoError(t, err)
|
||||
|
||||
root1, err1 := hash.HashProto(pbState1)
|
||||
|
||||
@@ -41,7 +41,7 @@ func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
|
||||
}
|
||||
nextSlotCacheHit.Inc()
|
||||
// Returning copied state.
|
||||
return nsc.state.Copy()
|
||||
return nsc.state.Copy(), nil
|
||||
}
|
||||
|
||||
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
|
||||
@@ -49,12 +49,8 @@ func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
|
||||
// This is useful to call after successfully processing a block.
|
||||
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconState) error {
|
||||
// Advancing one slot by using a copied state.
|
||||
copied, err := state.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
copied, err = ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
copied := state.Copy()
|
||||
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
// Cache last best value.
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
if SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
}
|
||||
}
|
||||
@@ -299,10 +299,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
err := SkipSlotCache.Put(ctx, key, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -127,12 +127,10 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Copy state to avoid mutating the state reference.
|
||||
state, err := state.Copy()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
state = state.Copy()
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
state, err = ProcessSlotsUsingNextSlotCache(ctx, state, parentRoot[:], signed.Block().Slot())
|
||||
if err != nil {
|
||||
@@ -202,17 +200,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
cSet, err := b.BLSChangesSignatureBatch(st, changes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
}
|
||||
set.Join(cSet)
|
||||
}
|
||||
return set, st, nil
|
||||
}
|
||||
|
||||
@@ -381,11 +368,7 @@ func altairOperations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return b.ProcessBLSToExecutionChanges(st, signedBeaconBlock)
|
||||
return b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
|
||||
@@ -39,9 +39,7 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -97,9 +95,7 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -76,9 +76,7 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
|
||||
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -341,8 +339,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(header))
|
||||
parentRoot, err := beaconState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
copied, err := beaconState.Copy()
|
||||
require.NoError(t, err)
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(beaconState.Slot()+1))
|
||||
randaoReveal, err := util.RandaoReveal(copied, currentEpoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -328,8 +328,8 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
hasStateSummary := s.HasStateSummary(ctx, blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
return errors.New("no state or state summary found with head block root")
|
||||
|
||||
@@ -59,9 +59,9 @@ func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for justified root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
@@ -82,9 +82,9 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for finalized root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
|
||||
@@ -92,9 +92,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
assert.NoError(t, hashErr)
|
||||
individualHashes = append(individualHashes, hash[:])
|
||||
}
|
||||
s, err := st.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(s)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(st.ToProtoUnsafe())
|
||||
assert.NoError(t, err)
|
||||
validatorsFoundCount := 0
|
||||
for _, val := range pbState.Validators {
|
||||
@@ -140,11 +138,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
blockRoot := [32]byte{'A'}
|
||||
rcvdState, err := dbStore.State(context.Background(), blockRoot)
|
||||
assert.NoError(t, err)
|
||||
s1, err := rcvdState.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
s2, err := state.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// find hashes of the validators that are set as part of the state
|
||||
var hashes []byte
|
||||
@@ -157,9 +151,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
}
|
||||
|
||||
// check if all the validators that were in the state, are stored properly in the validator bucket
|
||||
s3, err := rcvdState.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(s3)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(rcvdState.ToProtoUnsafe())
|
||||
assert.NoError(t, err)
|
||||
validatorsFoundCount := 0
|
||||
for _, val := range pbState.Validators {
|
||||
@@ -249,11 +241,7 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
blockRoot := [32]byte{'A'}
|
||||
rcvdState, err := dbStore.State(context.Background(), blockRoot)
|
||||
assert.NoError(t, err)
|
||||
s1, err := rcvdState.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
s2, err := state.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// find hashes of the validators that are set as part of the state
|
||||
var hashes []byte
|
||||
@@ -266,9 +254,7 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
}
|
||||
|
||||
// check if all the validators that were in the state, are stored properly in the validator bucket
|
||||
s3, err := rcvdState.ToProtoUnsafe()
|
||||
assert.NoError(t, err)
|
||||
pbState, err := state_native.ProtobufBeaconStateAltair(s3)
|
||||
pbState, err := state_native.ProtobufBeaconStateAltair(rcvdState.ToProtoUnsafe())
|
||||
assert.NoError(t, err)
|
||||
validatorsFoundCount := 0
|
||||
for _, val := range pbState.Validators {
|
||||
|
||||
@@ -189,11 +189,7 @@ func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*et
|
||||
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
|
||||
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
|
||||
for i, st := range states {
|
||||
p, err := st.ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pb, ok := p.(withValidators)
|
||||
pb, ok := st.ToProtoUnsafe().(withValidators)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
|
||||
}
|
||||
@@ -232,11 +228,7 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
p, err := states[i].ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rawType := p.(type) {
|
||||
switch rawType := states[i].ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
|
||||
if err != nil {
|
||||
@@ -542,19 +534,15 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
// marshal versioned state from struct type down to bytes.
|
||||
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
p, err := st.ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch p.(type) {
|
||||
switch st.ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
rState, ok := p.(*ethpb.BeaconState)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
return encode(ctx, rState)
|
||||
case *ethpb.BeaconStateAltair:
|
||||
rState, ok := p.(*ethpb.BeaconStateAltair)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
@@ -567,7 +555,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
rState, ok := p.(*ethpb.BeaconStateBellatrix)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
@@ -580,7 +568,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateCapella:
|
||||
rState, ok := p.(*ethpb.BeaconStateCapella)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -67,14 +67,9 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
defer span.End()
|
||||
|
||||
if s.stateSummaryCache.has(blockRoot) {
|
||||
return true
|
||||
}
|
||||
|
||||
var hasSummary bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
hasSummary = len(enc) > 0
|
||||
hasSummary = s.hasStateSummaryBytes(tx, blockRoot)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false
|
||||
@@ -82,6 +77,14 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
return hasSummary
|
||||
}
|
||||
|
||||
func (s *Store) hasStateSummaryBytes(tx *bolt.Tx, blockRoot [32]byte) bool {
|
||||
if s.stateSummaryCache.has(blockRoot) {
|
||||
return true
|
||||
}
|
||||
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// This saves all cached state summary objects to DB, and clears up the cache.
|
||||
func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
summaries := s.stateSummaryCache.getAll()
|
||||
|
||||
@@ -44,11 +44,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state and retrieved state are not matching")
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
@@ -81,11 +77,7 @@ func TestState_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -137,11 +129,7 @@ func TestStateAltair_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -251,11 +239,7 @@ func TestState_CanSaveRetrieveValidatorEntriesWithoutCache(t *testing.T) {
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -376,11 +360,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
|
||||
|
||||
savedGenesisS, err := db.GenesisState(context.Background())
|
||||
require.NoError(t, err)
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2, err := savedGenesisS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, s1, s2, "Did not retrieve saved state")
|
||||
assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedGenesisS.ToProtoUnsafe(), "Did not retrieve saved state")
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), [32]byte{'C'}))
|
||||
}
|
||||
|
||||
@@ -501,8 +481,7 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
s0, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s0 := st.ToProtoUnsafe()
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
|
||||
b.Block.Slot = 100
|
||||
@@ -514,8 +493,7 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
s1, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s1 := st.ToProtoUnsafe()
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r1))
|
||||
|
||||
b.Block.Slot = 1000
|
||||
@@ -527,27 +505,21 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
s2, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s2 := st.ToProtoUnsafe()
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r2))
|
||||
|
||||
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
|
||||
require.NoError(t, err)
|
||||
want, err := highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, s0)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s0)
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 101)
|
||||
require.NoError(t, err)
|
||||
want, err = highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, s1)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s1)
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 1001)
|
||||
require.NoError(t, err)
|
||||
want, err = highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, s2)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s2)
|
||||
}
|
||||
|
||||
func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
@@ -574,24 +546,14 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
|
||||
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
|
||||
require.NoError(t, err)
|
||||
want, err := highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
gotSt, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, gotSt)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), st.ToProtoUnsafe())
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 1)
|
||||
require.NoError(t, err)
|
||||
gs, err := genesisState.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
want, err = highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, gs)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe())
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
want, err = highest[0].ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, want, gs)
|
||||
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe())
|
||||
}
|
||||
|
||||
func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
|
||||
@@ -718,11 +680,7 @@ func TestAltairState_CanSaveRetrieve(t *testing.T) {
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
s0, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s1, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s0, s1)
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
@@ -872,11 +830,8 @@ func TestStateBellatrix_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
s0, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s1, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s0, s1, "saved state with validators and retrieved state are not matching")
|
||||
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -918,11 +873,8 @@ func TestBellatrixState_CanSaveRetrieve(t *testing.T) {
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
s0, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
s1, err := savedS.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, s0, s1)
|
||||
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -38,9 +38,9 @@ func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *eth
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for last validated root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
|
||||
@@ -51,26 +51,6 @@ func TestStore_LastValidatedCheckpoint_Recover(t *testing.T) {
|
||||
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
|
||||
}
|
||||
|
||||
func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) {
|
||||
db := setupDB(b)
|
||||
ctx := context.Background()
|
||||
root := bytesutil.ToBytes32([]byte{'A'})
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetSlot(1))
|
||||
require.NoError(b, db.SaveState(ctx, st, root))
|
||||
db.stateSummaryCache.clear()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.NoError(b, db.SaveLastValidatedCheckpoint(ctx, cp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -53,7 +52,6 @@ go_library(
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
@@ -110,7 +108,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -32,8 +30,6 @@ const (
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
|
||||
// GetPayloadMethod v1 request string for JSON-RPC.
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
@@ -70,7 +66,7 @@ type ExecutionPayloadReconstructor interface {
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
@@ -118,7 +114,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
|
||||
// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
|
||||
func (s *Service) ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ForkchoiceUpdated")
|
||||
defer span.End()
|
||||
@@ -131,31 +127,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
|
||||
if attrs == nil {
|
||||
return nil, nil, errors.New("nil payload attributer")
|
||||
}
|
||||
switch attrs.Version() {
|
||||
case version.Bellatrix:
|
||||
a, err := attrs.PbV1()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, a)
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Capella:
|
||||
a, err := attrs.PbV2()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethodV2, state, a)
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown payload attribute version: %v", attrs.Version())
|
||||
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
if result.Status == nil {
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
@@ -73,19 +72,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
p, err := payloadattribute.New(&pb.PayloadAttributes{})
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2, func(t *testing.T) {
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
p, err := payloadattribute.New(&pb.PayloadAttributesV2{})
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
@@ -181,38 +168,12 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributesV2{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
@@ -228,38 +189,12 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2+" SYNCING status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributesV2{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
@@ -275,14 +210,12 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedInvalidResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorIs(t, err, ErrInvalidPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
@@ -298,14 +231,12 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedAcceptedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
@@ -1431,18 +1362,6 @@ func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV2(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
item.Status.Status = pb.PayloadStatus_VALID
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) NewPayloadV1(
|
||||
_ context.Context, _ *pb.ExecutionPayload,
|
||||
) *pb.PayloadStatus {
|
||||
@@ -1493,45 +1412,6 @@ func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.Payloa
|
||||
return service
|
||||
}
|
||||
|
||||
func forkchoiceUpdateSetupV2(t *testing.T, fcs *pb.ForkchoiceState, att *pb.PayloadAttributesV2, res *ForkchoiceUpdatedResponse) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
forkChoiceStateReq, err := json.Marshal(fcs)
|
||||
require.NoError(t, err)
|
||||
payloadAttrsReq, err := json.Marshal(att)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(forkChoiceStateReq),
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(payloadAttrsReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": res,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayload) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
@@ -551,11 +551,7 @@ func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]by
|
||||
|
||||
// savePowchainData saves all powchain related metadata to disk.
|
||||
func (s *Service) savePowchainData(ctx context.Context) error {
|
||||
st, err := s.preGenesisState.ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(st)
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -777,11 +777,7 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
return errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
|
||||
st, err := s.preGenesisState.ToProtoUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(st)
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
)
|
||||
@@ -44,7 +43,7 @@ func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData)
|
||||
|
||||
// ForkchoiceUpdated --
|
||||
func (e *EngineClient) ForkchoiceUpdated(
|
||||
_ context.Context, fcs *pb.ForkchoiceState, _ payloadattribute.Attributer,
|
||||
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error) {
|
||||
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
|
||||
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
|
||||
|
||||
@@ -616,8 +616,8 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
|
||||
// ForkChoiceDump returns a full dump of forkhoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
@@ -650,7 +650,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceDump{
|
||||
resp := &v1.ForkChoiceResponse{
|
||||
JustifiedCheckpoint: jc,
|
||||
BestJustifiedCheckpoint: bjc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
@@ -659,7 +659,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
|
||||
HeadRoot: headRoot[:],
|
||||
ForkChoiceNodes: nodes,
|
||||
ForkchoiceNodes: nodes,
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ var (
|
||||
nodeCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_node_count",
|
||||
Help: "The number of nodes in the doubly linked tree based store structure.",
|
||||
Help: "The number of nodes in the DAG array based store structure.",
|
||||
},
|
||||
)
|
||||
headChangesCount = promauto.NewCounter(
|
||||
|
||||
@@ -138,7 +138,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
Root: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
@@ -147,14 +147,9 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
ExecutionPayload: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_OPTIMISTIC
|
||||
} else {
|
||||
thisNode.Validity = v1.ForkChoiceNodeValidity_VALID
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
|
||||
@@ -250,7 +250,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
|
||||
@@ -68,7 +68,7 @@ type Getter interface {
|
||||
HighestReceivedBlockSlot() types.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
|
||||
VotedFraction(root [32]byte) (uint64, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ go_library(
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/monitor"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
@@ -95,7 +94,6 @@ type BeaconNode struct {
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache *depositcache.DepositCache
|
||||
proposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
stateFeed *event.Feed
|
||||
@@ -173,7 +171,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
exitPool: voluntaryexits.NewPool(),
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
blsToExecPool: blstoexec.NewPool(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
@@ -599,7 +596,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error
|
||||
blockchain.WithAttestationPool(b.attestationPool),
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithBLSToExecPool(b.blsToExecPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithAttestationService(attService),
|
||||
@@ -678,7 +674,6 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
regularsync.WithExitPool(b.exitPool),
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithBlsToExecPool(b.blsToExecPool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
|
||||
@@ -11,15 +11,10 @@ go_library(
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -29,15 +24,8 @@ go_test(
|
||||
srcs = ["pool_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -4,25 +4,19 @@ import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
doublylinkedlist "github.com/prysmaticlabs/prysm/v3/container/doubly-linked-list"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls/blst"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PoolManager maintains pending and seen BLS-to-execution-change objects.
|
||||
// This pool is used by proposers to insert BLS-to-execution-change objects into new blocks.
|
||||
type PoolManager interface {
|
||||
PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
BLSToExecChangesForInclusion(state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
BLSToExecChangesForInclusion() ([]*ethpb.SignedBLSToExecutionChange, error)
|
||||
InsertBLSToExecChange(change *ethpb.SignedBLSToExecutionChange)
|
||||
MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error
|
||||
ValidatorExists(idx types.ValidatorIndex) bool
|
||||
}
|
||||
|
||||
// Pool is a concrete implementation of PoolManager.
|
||||
@@ -63,69 +57,25 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
|
||||
|
||||
// BLSToExecChangesForInclusion returns objects that are ready for inclusion at the given slot.
|
||||
// This method will not return more than the block enforced MaxBlsToExecutionChanges.
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
func (p *Pool) BLSToExecChangesForInclusion() ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, length)
|
||||
node := p.pending.First()
|
||||
for node != nil && len(result) < length {
|
||||
change, err := node.Value()
|
||||
var err error
|
||||
for i := 0; node != nil && i < length; i++ {
|
||||
result[i], err = node.Value()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, change)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("removing invalid BLSToExecutionChange from pool")
|
||||
// MarkIncluded removes the invalid change from the pool
|
||||
p.lock.RUnlock()
|
||||
if err := p.MarkIncluded(change); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
p.lock.RLock()
|
||||
} else {
|
||||
result = append(result, change)
|
||||
}
|
||||
node, err = node.Next()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p.lock.RUnlock()
|
||||
if len(result) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
// We now verify the signatures in batches
|
||||
cSet, err := blocks.BLSChangesSignatureBatch(st, result)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not get BLSToExecutionChanges signatures")
|
||||
} else {
|
||||
ok, err := cSet.Verify()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not batch verify BLSToExecutionChanges signatures")
|
||||
} else if ok {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
// Batch signature failed, check signatures individually
|
||||
verified := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
for i, sig := range cSet.Signatures {
|
||||
signature, err := blst.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not get signature from bytes")
|
||||
continue
|
||||
}
|
||||
if !signature.Verify(cSet.PublicKeys[i], cSet.Messages[i][:]) {
|
||||
logrus.Warning("removing BLSToExecutionChange with invalid signature from pool")
|
||||
if err := p.MarkIncluded(result[i]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
} else {
|
||||
verified = append(verified, result[i])
|
||||
}
|
||||
}
|
||||
return verified, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// InsertBLSToExecChange inserts an object into the pool.
|
||||
@@ -157,14 +107,3 @@ func (p *Pool) MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error {
|
||||
p.pending.Remove(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatorExists checks if the bls to execution change object exists
|
||||
// for that particular validator.
|
||||
func (p *Pool) ValidatorExists(idx types.ValidatorIndex) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
node := p.m[idx]
|
||||
|
||||
return node != nil
|
||||
}
|
||||
|
||||
@@ -3,15 +3,8 @@ package blstoexec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
@@ -43,89 +36,42 @@ func TestPendingBLSToExecChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
spb := ð.BeaconStateCapella{
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
}
|
||||
numValidators := 2 * params.BeaconConfig().MaxBlsToExecutionChanges
|
||||
validators := make([]*eth.Validator, numValidators)
|
||||
blsChanges := make([]*eth.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ð.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ð.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*eth.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ð.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
|
||||
t.Run("empty pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(changes))
|
||||
})
|
||||
t.Run("Less than MaxBlsToExecutionChanges in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges-1; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
pool.InsertBLSToExecChange(ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
},
|
||||
})
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
changes, err := pool.BLSToExecChangesForInclusion()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges-1), len(changes))
|
||||
})
|
||||
t.Run("MaxBlsToExecutionChanges in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
pool.InsertBLSToExecChange(ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
},
|
||||
})
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
changes, err := pool.BLSToExecChangesForInclusion()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
})
|
||||
t.Run("more than MaxBlsToExecutionChanges in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges+1; i++ {
|
||||
pool.InsertBLSToExecChange(ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
},
|
||||
})
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
changes, err := pool.BLSToExecChangesForInclusion()
|
||||
require.NoError(t, err)
|
||||
// We want FIFO semantics, which means validator with index 16 shouldn't be returned
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
@@ -133,30 +79,6 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
assert.NotEqual(t, types.ValidatorIndex(16), ch.Message.ValidatorIndex)
|
||||
}
|
||||
})
|
||||
t.Run("One Bad change", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
saveByte := signedChanges[1].Message.FromBlsPubkey[5]
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = 0xff
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
|
||||
})
|
||||
t.Run("One Bad Signature", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
copy(signedChanges[1].Signature, signedChanges[2].Signature)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInsertBLSToExecChange(t *testing.T) {
|
||||
@@ -334,76 +256,3 @@ func TestMarkIncluded(t *testing.T) {
|
||||
assert.NotNil(t, pool.m[1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidatorExists(t *testing.T) {
|
||||
t.Run("no validators in pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
assert.Equal(t, false, pool.ValidatorExists(0))
|
||||
})
|
||||
t.Run("validator added to pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
change := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
assert.Equal(t, true, pool.ValidatorExists(0))
|
||||
})
|
||||
t.Run("multiple validators added to pool", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
change := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
change = ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(10),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
change = ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(30),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
|
||||
assert.Equal(t, true, pool.ValidatorExists(0))
|
||||
assert.Equal(t, true, pool.ValidatorExists(10))
|
||||
assert.Equal(t, true, pool.ValidatorExists(30))
|
||||
})
|
||||
t.Run("validator added and then removed", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
change := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(change)
|
||||
require.NoError(t, pool.MarkIncluded(change))
|
||||
assert.Equal(t, false, pool.ValidatorExists(0))
|
||||
})
|
||||
t.Run("multiple validators added to pool and removed", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
firstChange := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(0),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(firstChange)
|
||||
secondChange := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(10),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(secondChange)
|
||||
thirdChange := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: types.ValidatorIndex(30),
|
||||
}}
|
||||
pool.InsertBLSToExecChange(thirdChange)
|
||||
|
||||
assert.NoError(t, pool.MarkIncluded(firstChange))
|
||||
assert.NoError(t, pool.MarkIncluded(thirdChange))
|
||||
|
||||
assert.Equal(t, false, pool.ValidatorExists(0))
|
||||
assert.Equal(t, true, pool.ValidatorExists(10))
|
||||
assert.Equal(t, false, pool.ValidatorExists(30))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -174,7 +174,6 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/host/blank:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/swarm/testing:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
|
||||
@@ -41,10 +41,6 @@ func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow b
|
||||
|
||||
// InterceptAccept checks whether the incidental inbound connection is allowed.
|
||||
func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
// Deny all incoming connections before we are ready
|
||||
if !s.started {
|
||||
return false
|
||||
}
|
||||
if !s.validateDial(n.RemoteMultiaddr()) {
|
||||
// Allow other go-routines to run in the event
|
||||
// we receive a large amount of junk connections.
|
||||
|
||||
@@ -40,7 +40,6 @@ func TestPeer_AtMaxLimit(t *testing.T) {
|
||||
s.cfg = &Config{MaxPeers: 0}
|
||||
s.addrFilter, err = configureFilter(&Config{})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
|
||||
require.NoError(t, err)
|
||||
s.host = h1
|
||||
@@ -84,7 +83,6 @@ func TestService_InterceptBannedIP(t *testing.T) {
|
||||
ip := "212.67.10.122"
|
||||
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
|
||||
for i := 0; i < ipBurst; i++ {
|
||||
valid := s.validateDial(multiAddress)
|
||||
@@ -98,37 +96,6 @@ func TestService_InterceptBannedIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_RejectInboundConnectionBeforeStarted(t *testing.T) {
|
||||
limit := 1
|
||||
s := &Service{
|
||||
ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false),
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: limit,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: mockp2p.NewTestP2P(t).BHost,
|
||||
cfg: &Config{MaxPeers: uint(limit)},
|
||||
}
|
||||
var err error
|
||||
s.addrFilter, err = configureFilter(&Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ip := "212.67.10.122"
|
||||
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
|
||||
require.NoError(t, err)
|
||||
|
||||
valid := s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if valid {
|
||||
t.Errorf("Expected multiaddress with ip %s to be rejected as p2p service is not ready", ip)
|
||||
}
|
||||
|
||||
s.started = true
|
||||
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if !valid {
|
||||
t.Errorf("Expected multiaddress with ip %s to be accepted after service is started", ip)
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
limit := 20
|
||||
s := &Service{
|
||||
@@ -146,7 +113,6 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
ip := "212.67.10.122"
|
||||
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
|
||||
valid := s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if !valid {
|
||||
@@ -191,7 +157,6 @@ func TestPeer_BelowMaxLimit(t *testing.T) {
|
||||
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
|
||||
require.NoError(t, err)
|
||||
s.host = h1
|
||||
s.started = true
|
||||
defer func() {
|
||||
err := h1.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -237,7 +202,6 @@ func TestPeerAllowList(t *testing.T) {
|
||||
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
|
||||
require.NoError(t, err)
|
||||
s.host = h1
|
||||
s.started = true
|
||||
defer func() {
|
||||
err := h1.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -284,7 +248,6 @@ func TestPeerDenyList(t *testing.T) {
|
||||
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
|
||||
require.NoError(t, err)
|
||||
s.host = h1
|
||||
s.started = true
|
||||
defer func() {
|
||||
err := h1.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -115,9 +115,7 @@ func TestSszNetworkEncoder_DecodeWithMultipleFrames(t *testing.T) {
|
||||
maxChunkSize := uint64(1 << 22)
|
||||
encoder.MaxChunkSize = maxChunkSize
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
pb, err := st.ToProtoUnsafe()
|
||||
require.NoError(t, err)
|
||||
_, err = e.EncodeWithMaxLength(buf, pb.(*ethpb.BeaconState))
|
||||
_, err := e.EncodeWithMaxLength(buf, st.ToProtoUnsafe().(*ethpb.BeaconState))
|
||||
require.NoError(t, err)
|
||||
// Max snappy block size
|
||||
if buf.Len() <= 76490 {
|
||||
|
||||
@@ -41,9 +41,6 @@ const (
|
||||
// voluntaryExitWeight specifies the scoring weight that we apply to
|
||||
// our voluntary exit topic.
|
||||
voluntaryExitWeight = 0.05
|
||||
// blsToExecutionChangeWeight specifies the scoring weight that we apply to
|
||||
// our bls to execution topic.
|
||||
blsToExecutionChangeWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -119,8 +116,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultProposerSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipAttesterSlashingMessage):
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -478,28 +473,6 @@ func defaultVoluntaryExitTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: blsToExecutionChangeWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func oneSlotDuration() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
}
|
||||
@@ -558,7 +531,7 @@ func scoreByWeight(weight, threshold float64) float64 {
|
||||
func maxScore() float64 {
|
||||
totalWeight := beaconBlockWeight + aggregateWeight + syncContributionWeight +
|
||||
attestationTotalWeight + syncCommitteesTotalWeight + attesterSlashingWeight +
|
||||
proposerSlashingWeight + voluntaryExitWeight + blsToExecutionChangeWeight
|
||||
proposerSlashingWeight + voluntaryExitWeight
|
||||
return (maxInMeshScore + maxFirstDeliveryScore) * totalWeight
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
AggregateAndProofSubnetTopicFormat: ðpb.SignedAggregateAttestationAndProof{},
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
@@ -96,7 +95,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[1].ID)
|
||||
assert.Equal(t, "/mplex/6.7.0", cfg.Muxers[0].ID)
|
||||
assert.Equal(t, "/yamux/1.0.0", cfg.Muxers[1].ID)
|
||||
|
||||
}
|
||||
|
||||
@@ -26,8 +26,6 @@ const (
|
||||
GossipAggregateAndProofMessage = "beacon_aggregate_and_proof"
|
||||
// GossipContributionAndProofMessage is the name for the sync contribution and proof message type.
|
||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -47,6 +45,4 @@ const (
|
||||
AggregateAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipAggregateAndProofMessage
|
||||
// SyncContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
)
|
||||
|
||||
@@ -445,12 +445,6 @@ type bellatrixBlindedBlockResponseJson struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type capellaBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockCapellaContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -532,15 +526,6 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockCapellaContainerJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -567,11 +552,6 @@ type bellatrixStateResponseJson struct {
|
||||
Data *BeaconStateBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type capellaStateResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -595,11 +575,6 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -698,50 +673,3 @@ func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefau
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func prepareForkChoiceResponse(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
dump, ok := response.(*ForkChoiceDumpJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("response is not of the correct type"))
|
||||
}
|
||||
|
||||
nodes := make([]*ForkChoiceNodeResponseJson, len(dump.ForkChoiceNodes))
|
||||
for i, n := range dump.ForkChoiceNodes {
|
||||
nodes[i] = &ForkChoiceNodeResponseJson{
|
||||
Slot: n.Slot,
|
||||
BlockRoot: n.BlockRoot,
|
||||
ParentRoot: n.ParentRoot,
|
||||
JustifiedEpoch: n.JustifiedEpoch,
|
||||
FinalizedEpoch: n.FinalizedEpoch,
|
||||
Weight: n.Weight,
|
||||
Validity: n.Validity,
|
||||
ExecutionBlockHash: n.ExecutionBlockHash,
|
||||
ExtraData: &ForkChoiceNodeExtraDataJson{
|
||||
UnrealizedJustifiedEpoch: n.UnrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.UnrealizedFinalizedEpoch,
|
||||
Balance: n.Balance,
|
||||
ExecutionOptimistic: n.ExecutionOptimistic,
|
||||
TimeStamp: n.TimeStamp,
|
||||
},
|
||||
}
|
||||
}
|
||||
forkChoice := &ForkChoiceResponseJson{
|
||||
JustifiedCheckpoint: dump.JustifiedCheckpoint,
|
||||
FinalizedCheckpoint: dump.FinalizedCheckpoint,
|
||||
ForkChoiceNodes: nodes,
|
||||
ExtraData: &ForkChoiceResponseExtraDataJson{
|
||||
BestJustifiedCheckpoint: dump.BestJustifiedCheckpoint,
|
||||
UnrealizedJustifiedCheckpoint: dump.UnrealizedJustifiedCheckpoint,
|
||||
UnrealizedFinalizedCheckpoint: dump.UnrealizedFinalizedCheckpoint,
|
||||
ProposerBoostRoot: dump.ProposerBoostRoot,
|
||||
PreviousProposerBoostRoot: dump.PreviousProposerBoostRoot,
|
||||
HeadRoot: dump.HeadRoot,
|
||||
},
|
||||
}
|
||||
|
||||
result, err := json.Marshal(forkChoice)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("could not marshal fork choice to JSON"))
|
||||
}
|
||||
return false, result, nil
|
||||
}
|
||||
|
||||
@@ -814,74 +814,6 @@ func TestSerializeBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &SignedBlindedBeaconBlockContainerJson{
|
||||
CapellaBlock: &BlindedBeaconBlockCapellaJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BlindedBeaconBlockBodyCapellaJson{
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapellaJson{
|
||||
ParentHash: "parent_hash",
|
||||
FeeRecipient: "fee_recipient",
|
||||
StateRoot: "state_root",
|
||||
ReceiptsRoot: "receipts_root",
|
||||
LogsBloom: "logs_bloom",
|
||||
PrevRandao: "prev_randao",
|
||||
BlockNumber: "block_number",
|
||||
GasLimit: "gas_limit",
|
||||
GasUsed: "gas_used",
|
||||
TimeStamp: "time_stamp",
|
||||
ExtraData: "extra_data",
|
||||
BaseFeePerGas: "base_fee_per_gas",
|
||||
BlockHash: "block_hash",
|
||||
TransactionsRoot: "transactions_root",
|
||||
WithdrawalsRoot: "withdrawals_root",
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &capellaBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
payloadHeader := beaconBlock.Body.ExecutionPayloadHeader
|
||||
assert.NotNil(t, payloadHeader)
|
||||
assert.Equal(t, "parent_hash", payloadHeader.ParentHash)
|
||||
assert.Equal(t, "fee_recipient", payloadHeader.FeeRecipient)
|
||||
assert.Equal(t, "state_root", payloadHeader.StateRoot)
|
||||
assert.Equal(t, "receipts_root", payloadHeader.ReceiptsRoot)
|
||||
assert.Equal(t, "logs_bloom", payloadHeader.LogsBloom)
|
||||
assert.Equal(t, "prev_randao", payloadHeader.PrevRandao)
|
||||
assert.Equal(t, "block_number", payloadHeader.BlockNumber)
|
||||
assert.Equal(t, "gas_limit", payloadHeader.GasLimit)
|
||||
assert.Equal(t, "gas_used", payloadHeader.GasUsed)
|
||||
assert.Equal(t, "time_stamp", payloadHeader.TimeStamp)
|
||||
assert.Equal(t, "extra_data", payloadHeader.ExtraData)
|
||||
assert.Equal(t, "base_fee_per_gas", payloadHeader.BaseFeePerGas)
|
||||
assert.Equal(t, "block_hash", payloadHeader.BlockHash)
|
||||
assert.Equal(t, "transactions_root", payloadHeader.TransactionsRoot)
|
||||
assert.Equal(t, "withdrawals_root", payloadHeader.WithdrawalsRoot)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
@@ -949,21 +881,6 @@ func TestSerializeV2State(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(j, &bellatrixStateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &BeaconStateContainerV2Json{
|
||||
Phase0State: nil,
|
||||
CapellaState: &BeaconStateCapellaJson{},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
require.NoError(t, json.Unmarshal(j, &capellaStateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
runDefault, j, errJson := serializeV2State(&types.Empty{})
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
@@ -1202,113 +1119,3 @@ func TestSerializeProduceBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareForkChoiceResponse(t *testing.T) {
|
||||
dump := &ForkChoiceDumpJson{
|
||||
JustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "justified",
|
||||
Root: "justified",
|
||||
},
|
||||
FinalizedCheckpoint: &CheckpointJson{
|
||||
Epoch: "finalized",
|
||||
Root: "finalized",
|
||||
},
|
||||
BestJustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "best_justified",
|
||||
Root: "best_justified",
|
||||
},
|
||||
UnrealizedJustifiedCheckpoint: &CheckpointJson{
|
||||
Epoch: "unrealized_justified",
|
||||
Root: "unrealized_justified",
|
||||
},
|
||||
UnrealizedFinalizedCheckpoint: &CheckpointJson{
|
||||
Epoch: "unrealized_finalized",
|
||||
Root: "unrealized_finalized",
|
||||
},
|
||||
ProposerBoostRoot: "proposer_boost_root",
|
||||
PreviousProposerBoostRoot: "previous_proposer_boost_root",
|
||||
HeadRoot: "head_root",
|
||||
ForkChoiceNodes: []*ForkChoiceNodeJson{
|
||||
{
|
||||
Slot: "node1_slot",
|
||||
BlockRoot: "node1_block_root",
|
||||
ParentRoot: "node1_parent_root",
|
||||
JustifiedEpoch: "node1_justified_epoch",
|
||||
FinalizedEpoch: "node1_finalized_epoch",
|
||||
UnrealizedJustifiedEpoch: "node1_unrealized_justified_epoch",
|
||||
UnrealizedFinalizedEpoch: "node1_unrealized_finalized_epoch",
|
||||
Balance: "node1_balance",
|
||||
Weight: "node1_weight",
|
||||
ExecutionOptimistic: false,
|
||||
ExecutionBlockHash: "node1_execution_block_hash",
|
||||
TimeStamp: "node1_time_stamp",
|
||||
Validity: "node1_validity",
|
||||
},
|
||||
{
|
||||
Slot: "node2_slot",
|
||||
BlockRoot: "node2_block_root",
|
||||
ParentRoot: "node2_parent_root",
|
||||
JustifiedEpoch: "node2_justified_epoch",
|
||||
FinalizedEpoch: "node2_finalized_epoch",
|
||||
UnrealizedJustifiedEpoch: "node2_unrealized_justified_epoch",
|
||||
UnrealizedFinalizedEpoch: "node2_unrealized_finalized_epoch",
|
||||
Balance: "node2_balance",
|
||||
Weight: "node2_weight",
|
||||
ExecutionOptimistic: true,
|
||||
ExecutionBlockHash: "node2_execution_block_hash",
|
||||
TimeStamp: "node2_time_stamp",
|
||||
Validity: "node2_validity",
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errorJson := prepareForkChoiceResponse(dump)
|
||||
assert.Equal(t, nil, errorJson)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
result := &ForkChoiceResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, result))
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, "justified", result.JustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "justified", result.JustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Epoch)
|
||||
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Root)
|
||||
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Epoch)
|
||||
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Root)
|
||||
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Epoch)
|
||||
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Root)
|
||||
assert.Equal(t, "proposer_boost_root", result.ExtraData.ProposerBoostRoot)
|
||||
assert.Equal(t, "previous_proposer_boost_root", result.ExtraData.PreviousProposerBoostRoot)
|
||||
assert.Equal(t, "head_root", result.ExtraData.HeadRoot)
|
||||
require.Equal(t, 2, len(result.ForkChoiceNodes))
|
||||
node1 := result.ForkChoiceNodes[0]
|
||||
require.NotNil(t, node1)
|
||||
assert.Equal(t, "node1_slot", node1.Slot)
|
||||
assert.Equal(t, "node1_block_root", node1.BlockRoot)
|
||||
assert.Equal(t, "node1_parent_root", node1.ParentRoot)
|
||||
assert.Equal(t, "node1_justified_epoch", node1.JustifiedEpoch)
|
||||
assert.Equal(t, "node1_finalized_epoch", node1.FinalizedEpoch)
|
||||
assert.Equal(t, "node1_unrealized_justified_epoch", node1.ExtraData.UnrealizedJustifiedEpoch)
|
||||
assert.Equal(t, "node1_unrealized_finalized_epoch", node1.ExtraData.UnrealizedFinalizedEpoch)
|
||||
assert.Equal(t, "node1_balance", node1.ExtraData.Balance)
|
||||
assert.Equal(t, "node1_weight", node1.Weight)
|
||||
assert.Equal(t, false, node1.ExtraData.ExecutionOptimistic)
|
||||
assert.Equal(t, "node1_execution_block_hash", node1.ExecutionBlockHash)
|
||||
assert.Equal(t, "node1_time_stamp", node1.ExtraData.TimeStamp)
|
||||
assert.Equal(t, "node1_validity", node1.Validity)
|
||||
node2 := result.ForkChoiceNodes[1]
|
||||
require.NotNil(t, node2)
|
||||
assert.Equal(t, "node2_slot", node2.Slot)
|
||||
assert.Equal(t, "node2_block_root", node2.BlockRoot)
|
||||
assert.Equal(t, "node2_parent_root", node2.ParentRoot)
|
||||
assert.Equal(t, "node2_justified_epoch", node2.JustifiedEpoch)
|
||||
assert.Equal(t, "node2_finalized_epoch", node2.FinalizedEpoch)
|
||||
assert.Equal(t, "node2_unrealized_justified_epoch", node2.ExtraData.UnrealizedJustifiedEpoch)
|
||||
assert.Equal(t, "node2_unrealized_finalized_epoch", node2.ExtraData.UnrealizedFinalizedEpoch)
|
||||
assert.Equal(t, "node2_balance", node2.ExtraData.Balance)
|
||||
assert.Equal(t, "node2_weight", node2.Weight)
|
||||
assert.Equal(t, true, node2.ExtraData.ExecutionOptimistic)
|
||||
assert.Equal(t, "node2_execution_block_hash", node2.ExecutionBlockHash)
|
||||
assert.Equal(t, "node2_time_stamp", node2.ExtraData.TimeStamp)
|
||||
assert.Equal(t, "node2_validity", node2.Validity)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/forkchoice",
|
||||
"/eth/v1/debug/beacon/forkchoice",
|
||||
"/eth/v1/config/fork_schedule",
|
||||
"/eth/v1/config/deposit_contract",
|
||||
"/eth/v1/config/spec",
|
||||
@@ -197,11 +197,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.GetResponse = &ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v2/debug/beacon/heads":
|
||||
endpoint.GetResponse = &V2ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v1/debug/forkchoice":
|
||||
endpoint.GetResponse = &ForkChoiceDumpJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: prepareForkChoiceResponse,
|
||||
}
|
||||
case "/eth/v1/debug/beacon/forkchoice":
|
||||
endpoint.GetResponse = &ForkchoiceResponse{}
|
||||
case "/eth/v1/config/fork_schedule":
|
||||
endpoint.GetResponse = &ForkScheduleResponseJson{}
|
||||
case "/eth/v1/config/deposit_contract":
|
||||
|
||||
@@ -290,40 +290,16 @@ type SubmitContributionAndProofsRequestJson struct {
|
||||
Data []*SignedContributionAndProofJson `json:"data"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeResponseJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
Weight string `json:"weight"`
|
||||
Validity string `json:"validity" enum:"true"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
|
||||
ExtraData *ForkChoiceNodeExtraDataJson `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeExtraDataJson struct {
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
type ForkChoiceResponseJson struct {
|
||||
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
ForkChoiceNodes []*ForkChoiceNodeResponseJson `json:"fork_choice_nodes"`
|
||||
ExtraData *ForkChoiceResponseExtraDataJson `json:"extra_data"`
|
||||
}
|
||||
|
||||
type ForkChoiceResponseExtraDataJson struct {
|
||||
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
type ForkchoiceResponse struct {
|
||||
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
ForkChoiceNodes []*ForkChoiceNodeJson `json:"forkchoice_nodes"`
|
||||
}
|
||||
|
||||
type LivenessResponseJson struct {
|
||||
@@ -381,7 +357,6 @@ type SignedBlindedBeaconBlockContainerJson struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -412,11 +387,6 @@ type SignedBlindedBeaconBlockBellatrixContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockCapellaContainerJson struct {
|
||||
Message *BlindedBeaconBlockCapellaJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockAltairJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -441,14 +411,6 @@ type BlindedBeaconBlockBellatrixJson struct {
|
||||
Body *BlindedBeaconBlockBodyBellatrixJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockCapellaJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyAltairJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
@@ -487,20 +449,6 @@ type BlindedBeaconBlockBodyBellatrixJson struct {
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyCapellaJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*BLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -535,24 +483,6 @@ type ExecutionPayloadHeaderJson struct {
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapellaJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncAggregateJson struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||
@@ -623,12 +553,6 @@ type AttestationDataJson struct {
|
||||
Target *CheckpointJson `json:"target"`
|
||||
}
|
||||
|
||||
type BLSToExecutionChangeJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
FromBLSPubkey string `json:"from_bls_pubkey" hex:"true"`
|
||||
ToExecutionAddress string `json:"to_execution_address" hex:"true"`
|
||||
}
|
||||
|
||||
type DepositJson struct {
|
||||
Proof []string `json:"proof" hex:"true"`
|
||||
Data *Deposit_DataJson `json:"data"`
|
||||
@@ -762,41 +686,10 @@ type BeaconStateBellatrixJson struct {
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"latest_execution_payload_header"`
|
||||
}
|
||||
|
||||
type BeaconStateCapellaJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
Phase0State *BeaconStateJson `json:"phase0_state"`
|
||||
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
||||
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
||||
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
||||
}
|
||||
|
||||
type ForkJson struct {
|
||||
@@ -937,7 +830,7 @@ type SignedValidatorRegistrationsRequestJson struct {
|
||||
|
||||
type ForkChoiceNodeJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
Root string `json:"root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
@@ -946,21 +839,8 @@ type ForkChoiceNodeJson struct {
|
||||
Balance string `json:"balance"`
|
||||
Weight string `json:"weight"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
|
||||
ExecutionPayload string `json:"execution_payload" hex:"true"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Validity string `json:"validity" enum:"true"`
|
||||
}
|
||||
|
||||
type ForkChoiceDumpJson struct {
|
||||
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blinded_blocks.go",
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
@@ -70,7 +69,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"blinded_blocks_test.go",
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"init_test.go",
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/migration"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// GetBlindedBlock retrieves blinded block for given block id.
|
||||
func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := getBlindedBlockPhase0(blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = getBlindedBlockAltair(blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockCapella(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
func getBlindedBlockPhase0(blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if phase0Blk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
|
||||
Signature: v1Blk.Signature,
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getBlindedBlockAltair(blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if altairBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if capellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedCapellaBlock, err := blindedBlkInterface.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,594 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
executionTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlock()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
|
||||
require.NoError(t, err)
|
||||
|
||||
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(phase0Block.Phase0Block, v1Block.Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetAltairBlock().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBeaconBlockAltair()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlockAltair()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlockAltair()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlockAltair
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(altairBlock.AltairBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetBlindedBellatrixBlock().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &executionTest.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBlindedBeaconBlockBellatrix
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(b.BellatrixBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetBlindedCapellaBlock().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBlindedBeaconBlockCapella()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBlindedBeaconBlockCapella()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBlindedBeaconBlockCapella()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &executionTest.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBlindedBeaconBlockCapella
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(b.CapellaBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBeaconBlockBellatrix()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlockBellatrix()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.ExecutionOptimistic)
|
||||
})
|
||||
}
|
||||
@@ -34,10 +34,6 @@ import (
|
||||
|
||||
const versionHeader = "eth-consensus-version"
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
|
||||
// blockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type blockIdParseError struct {
|
||||
message string
|
||||
@@ -688,6 +684,138 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// GetBlindedBlock retrieves blinded block for given block id.
|
||||
func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err == nil {
|
||||
if phase0Blk == nil {
|
||||
return nil, status.Error(codes.Internal, "Nil block")
|
||||
}
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
|
||||
Signature: v1Blk.Signature,
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err == nil {
|
||||
if altairBlk == nil {
|
||||
return nil, status.Error(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err == nil {
|
||||
if bellatrixBlk == nil {
|
||||
return nil, status.Error(codes.Internal, "Nil block")
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to blinded block: %v", err)
|
||||
}
|
||||
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, status.Error(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// GetBlindedBlockSSZ returns the SSZ-serialized version of the blinded beacon block for given block id.
|
||||
func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlockSSZ")
|
||||
|
||||
@@ -200,48 +200,6 @@ func fillDBTestBlocksBellatrixBlinded(ctx context.Context, t *testing.T, beaconD
|
||||
return genBlk, blkContainers
|
||||
}
|
||||
|
||||
func fillDBTestBlocksCapellaBlinded(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBlindedBeaconBlockCapella, []*ethpbalpha.BeaconBlockContainer) {
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
genBlk := util.NewBlindedBeaconBlockCapella()
|
||||
genBlk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genBlk)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]interfaces.SignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpbalpha.BeaconBlockContainer, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blks[i] = signedB
|
||||
blkContainers[i] = ðpbalpha.BeaconBlockContainer{
|
||||
Block: ðpbalpha.BeaconBlockContainer_BlindedCapellaBlock{BlindedCapellaBlock: b}, BlockRoot: root[:]}
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlocks(ctx, blks))
|
||||
headRoot := bytesutil.ToBytes32(blkContainers[len(blks)-1].BlockRoot)
|
||||
summary := ðpbalpha.StateSummary{
|
||||
Root: headRoot[:],
|
||||
Slot: blkContainers[len(blks)-1].Block.(*ethpbalpha.BeaconBlockContainer_BlindedCapellaBlock).BlindedCapellaBlock.Block.Slot,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, summary))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
return genBlk, blkContainers
|
||||
}
|
||||
|
||||
func TestServer_GetBlockHeader(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -1584,6 +1542,444 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlock()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
|
||||
require.NoError(t, err)
|
||||
|
||||
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(phase0Block.Phase0Block, v1Block.Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetAltairBlock().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBeaconBlockAltair()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlockAltair()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlockAltair()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlockAltair
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(altairBlock.AltairBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetBlindedBellatrixBlock().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &executionTest.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBlindedBeaconBlockBellatrix
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(b.BellatrixBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBeaconBlockBellatrix()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlockBellatrix()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.ExecutionOptimistic)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlockSSZ(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -492,9 +492,8 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newBs, err := bs.Copy()
|
||||
require.NoError(t, err)
|
||||
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
@@ -660,9 +659,8 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newBs, err := bs.Copy()
|
||||
require.NoError(t, err)
|
||||
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
|
||||
@@ -789,9 +787,8 @@ func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
newBs, err := bs.Copy()
|
||||
require.NoError(t, err)
|
||||
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
|
||||
@@ -83,18 +83,6 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
case version.Capella:
|
||||
protoState, err := migration.BeaconStateCapellaToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
return ðpbv2.BeaconStateResponseV2{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.BeaconStateContainer{
|
||||
State: ðpbv2.BeaconStateContainer_CapellaState{CapellaState: protoState},
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -122,8 +110,6 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
ver = ethpbv2.Version_ALTAIR
|
||||
case version.Bellatrix:
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
case version.Capella:
|
||||
ver = ethpbv2.Version_CAPELLA
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -156,6 +142,6 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
}
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceResponse, error) {
|
||||
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
@@ -76,23 +76,6 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -199,25 +182,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
sszState, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
resp, err := server.GetBeaconStateSSZV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
|
||||
@@ -239,10 +239,7 @@ func (bs *Server) ListValidators(
|
||||
return nil, err
|
||||
}
|
||||
if s > reqState.Slot() {
|
||||
reqState, err = reqState.Copy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqState = reqState.Copy()
|
||||
reqState, err = transition.ProcessSlots(ctx, reqState, s)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user