mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
55 Commits
capella-de
...
evil-shape
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f098675d8c | ||
|
|
9111616225 | ||
|
|
25c02dbc95 | ||
|
|
6c9b7786a1 | ||
|
|
28ffa0554c | ||
|
|
7e7ac421f5 | ||
|
|
727284d51a | ||
|
|
8d236a6d0e | ||
|
|
b26f8c90bf | ||
|
|
d7099ebb38 | ||
|
|
2fee906d25 | ||
|
|
9f44d6e452 | ||
|
|
55f311eb73 | ||
|
|
0f90bacac9 | ||
|
|
30974039f3 | ||
|
|
3392ecb3e1 | ||
|
|
fa2b64f702 | ||
|
|
6f5e35f08a | ||
|
|
79d6ce45ad | ||
|
|
73cd7df679 | ||
|
|
d084d5a979 | ||
|
|
db6b1c15c4 | ||
|
|
7c9bff489e | ||
|
|
1fca73d761 | ||
|
|
fbafbdd62c | ||
|
|
75d98cf9af | ||
|
|
96401e734e | ||
|
|
5480d607ac | ||
|
|
38f0a81526 | ||
|
|
ad680d3417 | ||
|
|
047cae5e8b | ||
|
|
179252faea | ||
|
|
f93b82ee89 | ||
|
|
508e1ad005 | ||
|
|
1b2d917389 | ||
|
|
eb6b811071 | ||
|
|
cf71dbdf97 | ||
|
|
9328e9af1f | ||
|
|
4762fd71de | ||
|
|
769daffb73 | ||
|
|
16e6e0de6c | ||
|
|
396fc3dc7e | ||
|
|
7fa3ebfaa8 | ||
|
|
81b9eceb50 | ||
|
|
505ff6ea3d | ||
|
|
2b5125c7bc | ||
|
|
1e3a55c6a6 | ||
|
|
116f3ac265 | ||
|
|
bbe003720c | ||
|
|
e957edcb12 | ||
|
|
684022fa69 | ||
|
|
a7a64632b1 | ||
|
|
c89ab764e3 | ||
|
|
375a76d6c9 | ||
|
|
bad7c6a94e |
@@ -1,4 +1,4 @@
|
||||
# Dependency Managagement in Prysm
|
||||
# Dependency Management in Prysm
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
|
||||
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
||||
pb.go files at build time when file changes are present in any protobuf definition file or after
|
||||
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
||||
the following scripts often to ensure their generated files are up to date. Further more, Prysm
|
||||
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
|
||||
generates SSZ marshal related code based on defined data structures. These generated files must
|
||||
also be updated and checked in as frequently.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -188,7 +188,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-alpha.2"
|
||||
consensus_spec_version = "v1.3.0-rc.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -204,7 +204,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "a92c41058dc17ced811cc85570cd6f8af761aedfcbd2dd7dd4fb64ac961d76f9",
|
||||
sha256 = "3d6fadb64674eb64a84fae6c2efa9949231ea91e7cb74ada9214097323e86569",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -220,7 +220,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "49a7944da92429ac8f41347f19837762247cdbf00e628c285d1b826e58e4096d",
|
||||
sha256 = "54ffbcab1e77316a280e6f5a64c6ed62351e8f5678e6fa49340e49b9b5575e8e",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -236,7 +236,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "d19673e9cd55e0c8d45eefc33b60978e14c166d0e891976fcaa114085312adcb",
|
||||
sha256 = "bb06d30ca533dc97d45f2367916ba9ff1b5af52f08a9d8c33bb7b1a61254094e",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -251,7 +251,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "a72b7457c403f6b76567d4d7bec19d01bedf7d5ef1d6f2c3a9e09ee86d401a14",
|
||||
sha256 = "9d22246c00ec3907ef8dc3ddccdfe6f7153ce46df73deee0a0176fe7e4aa1126",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -93,7 +93,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessHistoricalRootsUpdate(state)
|
||||
state, err = e.ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -67,6 +67,10 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -79,7 +83,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -82,7 +82,11 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), aState.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), aState.HistoricalRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := aState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), aState.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), aState.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), aState.Eth1DepositIndex())
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
var BLSChangesSigningDomain = blsChangesSigningDomain
|
||||
|
||||
@@ -37,7 +37,7 @@ func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
return block
|
||||
}
|
||||
|
||||
var ErrUnrecognizedState = errors.New("uknonwn underlying type for state.BeaconState value")
|
||||
var ErrUnrecognizedState = errors.New("unknown underlying type for state.BeaconState value")
|
||||
|
||||
func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfaces.SignedBeaconBlock, error) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,6 +113,38 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateCapella:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyCapella{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
@@ -162,25 +161,6 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// blsChangesSigningDomain returns the signing domain to check BLSToExecutionChange messages against.
|
||||
func blsChangesSigningDomain(st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
var epoch types.Epoch
|
||||
var fork *ethpb.Fork
|
||||
if st.Version() < version.Capella {
|
||||
epoch = params.BeaconConfig().CapellaForkEpoch
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: epoch,
|
||||
}
|
||||
|
||||
} else {
|
||||
epoch = slots.ToEpoch(st.Slot())
|
||||
fork = st.Fork()
|
||||
}
|
||||
return signing.Domain(fork, epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BLSChangesSignatureBatch(
|
||||
st state.ReadOnlyBeaconState,
|
||||
changes []*ethpb.SignedBLSToExecutionChange,
|
||||
@@ -195,9 +175,10 @@ func BLSChangesSignatureBatch(
|
||||
Messages: make([][32]byte, len(changes)),
|
||||
Descriptions: make([]string, len(changes)),
|
||||
}
|
||||
domain, err := blsChangesSigningDomain(st)
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not compute signing domain")
|
||||
}
|
||||
for i, change := range changes {
|
||||
batch.Signatures[i] = change.Signature
|
||||
@@ -223,7 +204,8 @@ func VerifyBLSChangeSignature(
|
||||
st state.BeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
domain, err := blsChangesSigningDomain(st)
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing domain")
|
||||
}
|
||||
|
||||
@@ -791,6 +791,71 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
|
||||
require.NoError(t, err)
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
savedConfig := cfg.Copy()
|
||||
@@ -847,7 +912,7 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
spc := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -859,7 +924,7 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(stc, time.CurrentEpoch(stc), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
signature, err := signing.ComputeDomainAndSign(stc, 0, message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
@@ -879,51 +944,3 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
func TestBLSChangesSigningDomain(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
savedConfig := cfg.Copy()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch.AddEpoch(2)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
capellaDomain := []byte{0xa, 0x0, 0x0, 0x0, 0xe7, 0xb4, 0xbb, 0x67, 0x55, 0x1d, 0xde, 0x95, 0x89, 0xc1, 0x55, 0x3d, 0xfd, 0xa3, 0x7a, 0x94, 0x2a, 0x18, 0xca, 0xf1, 0x84, 0xf9, 0xcc, 0x16, 0x29, 0xd2, 0x5c, 0xf5}
|
||||
|
||||
t.Run("pre-Capella fork", func(t *testing.T) {
|
||||
spb := ðpb.BeaconStateBellatrix{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
|
||||
st, err := state_native.InitializeFromProtoBellatrix(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
domain, err := blocks.BLSChangesSigningDomain(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, capellaDomain, domain)
|
||||
})
|
||||
t.Run("post-Capella fork", func(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
domain, err := blocks.BLSChangesSigningDomain(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, capellaDomain, domain)
|
||||
})
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,10 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -54,7 +58,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
@@ -90,6 +94,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
},
|
||||
NextWithdrawalIndex: 0,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
HistoricalSummaries: make([]*ethpb.HistoricalSummary, 0),
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeCapella(s)
|
||||
|
||||
@@ -25,7 +25,6 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
@@ -98,4 +97,8 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
}
|
||||
|
||||
@@ -13,11 +13,14 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -33,8 +36,10 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -14,11 +14,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
@@ -349,33 +352,39 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessHistoricalRootsUpdate processes the updates to historical root accumulator during epoch processing.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_historical_roots_update(state: BeaconState) -> None:
|
||||
// # Set historical root accumulator
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
// state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
|
||||
// Set historical root accumulator.
|
||||
epochsPerHistoricalRoot := params.BeaconConfig().SlotsPerHistoricalRoot.DivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
if nextEpoch.Mod(uint64(epochsPerHistoricalRoot)) == 0 {
|
||||
historicalBatch := ðpb.HistoricalBatch{
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
}
|
||||
batchRoot, err := historicalBatch.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash historical batch")
|
||||
}
|
||||
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
|
||||
return nil, err
|
||||
if state.Version() >= version.Capella {
|
||||
br, err := stateutil.ArraysRoot(state.BlockRoots(), fieldparams.BlockRootsLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sr, err := stateutil.ArraysRoot(state.StateRoots(), fieldparams.StateRootsLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.AppendHistoricalSummaries(ðpb.HistoricalSummary{BlockSummaryRoot: br[:], StateSummaryRoot: sr[:]}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
historicalBatch := ðpb.HistoricalBatch{
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
}
|
||||
batchRoot, err := historicalBatch.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash historical batch")
|
||||
}
|
||||
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,7 +435,7 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
// Set historical root accumulator.
|
||||
state, err = ProcessHistoricalRootsUpdate(state)
|
||||
state, err = ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
@@ -274,7 +276,9 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) {
|
||||
assert.DeepNotEqual(t, params.BeaconConfig().ZeroHash[:], mix, "latest RANDAO still zero hashes")
|
||||
|
||||
// Verify historical root accumulator was appended.
|
||||
assert.Equal(t, 1, len(newS.HistoricalRoots()), "Unexpected slashed balance")
|
||||
roots, err := newS.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(roots), "Unexpected slashed balance")
|
||||
currAtt, err := newS.CurrentEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, currAtt, "Nil value stored in current epoch attestations instead of empty slice")
|
||||
@@ -455,3 +459,83 @@ func TestProcessSlashings_BadValue(t *testing.T) {
|
||||
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
st func() state.BeaconState
|
||||
verifier func(state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "no change",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(roots))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "before capella can process and get historical root",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
|
||||
b := ðpb.HistoricalBatch{
|
||||
BlockRoots: st.BlockRoots(),
|
||||
StateRoots: st.StateRoots(),
|
||||
}
|
||||
r, err := b.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r[:], roots[0])
|
||||
|
||||
_, err = st.HistoricalSummaries()
|
||||
require.ErrorContains(t, "HistoricalSummaries is not supported for phase0", err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "after capella can process and get historical summary",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
summaries, err := st.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(summaries))
|
||||
|
||||
br, err := stateutil.ArraysRoot(st.BlockRoots(), fieldparams.BlockRootsLength)
|
||||
require.NoError(t, err)
|
||||
sr, err := stateutil.ArraysRoot(st.StateRoots(), fieldparams.StateRootsLength)
|
||||
require.NoError(t, err)
|
||||
b := ðpb.HistoricalSummary{
|
||||
BlockSummaryRoot: br[:],
|
||||
StateSummaryRoot: sr[:],
|
||||
}
|
||||
require.DeepEqual(t, b, summaries[0])
|
||||
hrs, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hrs, [][]byte{})
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := epoch.ProcessHistoricalDataUpdate(tt.st())
|
||||
require.NoError(t, err)
|
||||
tt.verifier(got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,10 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -47,7 +51,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -24,7 +24,11 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
|
||||
@@ -19,6 +19,9 @@ const (
|
||||
|
||||
// SyncCommitteeContributionReceived is sent after a sync committee contribution object has been received.
|
||||
SyncCommitteeContributionReceived
|
||||
|
||||
// BLSToExecutionChangeReceived is sent after a BLS to execution change object has been received from gossip or rpc.
|
||||
BLSToExecutionChangeReceived
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -44,3 +47,8 @@ type SyncCommitteeContributionReceivedData struct {
|
||||
// Contribution is the sync committee contribution object.
|
||||
Contribution *ethpb.SignedContributionAndProof
|
||||
}
|
||||
|
||||
// BLSToExecutionChangeReceivedData is the data sent with BLSToExecutionChangeReceived events.
|
||||
type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
@@ -48,6 +50,37 @@ func testGenesisDataSaved(t *testing.T, db iface.Database) {
|
||||
require.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
|
||||
}
|
||||
|
||||
func TestLoadCapellaFromFile(t *testing.T) {
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
// This state fixture is from a hive testnet, `0a` is the suffix they are using in their fork versions.
|
||||
suffix, err := hex.DecodeString("0a")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(suffix))
|
||||
reversioned := cfg.Copy()
|
||||
params.FillTestVersions(reversioned, suffix[0])
|
||||
reversioned.CapellaForkEpoch = 0
|
||||
require.Equal(t, [4]byte{3, 0, 0, 10}, bytesutil.ToBytes4(reversioned.CapellaForkVersion))
|
||||
reversioned.ConfigName = "capella-genesis-test"
|
||||
undo, err := params.SetActiveWithUndo(reversioned)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
fp := "testdata/capella_genesis.ssz"
|
||||
rfp, err := bazel.Runfile(fp)
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
sb, err := os.ReadFile(fp)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// for this test to work, we need the active config to have these properties:
|
||||
// - fork version schedule that matches mainnnet.genesis.ssz
|
||||
@@ -57,7 +90,7 @@ func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// uses the mainnet fork schedule. construct the differently named mainnet config and set it active.
|
||||
// finally, revert all this at the end of the test.
|
||||
|
||||
// first get the real mainnet out of the way by overwriting it schedule.
|
||||
// first get the real mainnet out of the way by overwriting its schedule.
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
cfg = cfg.Copy()
|
||||
|
||||
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
Normal file
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
Normal file
Binary file not shown.
@@ -86,6 +86,8 @@ type EngineCaller interface {
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
@@ -471,6 +473,10 @@ func (s *Service) ReconstructFullBlock(
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = blindedBlock.Version()
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
if err != nil {
|
||||
|
||||
@@ -54,6 +54,7 @@ func (RPCClientBad) CallContext(context.Context, interface{}, string, ...interfa
|
||||
}
|
||||
|
||||
func TestClient_IPC(t *testing.T) {
|
||||
t.Skip("Skipping IPC test to support Capella devnet-3")
|
||||
server := newTestIPCServer(t)
|
||||
defer server.Stop()
|
||||
rpcClient := rpc.DialInProc(server)
|
||||
@@ -154,6 +155,8 @@ func TestClient_IPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClient_HTTP(t *testing.T) {
|
||||
t.Skip("Skipping HTTP test to support Capella devnet-3")
|
||||
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
@@ -23,6 +24,7 @@ type EngineClient struct {
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
@@ -54,7 +56,10 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, _ types.Slot) (interfaces.ExecutionData, error) {
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s types.Slot) (interfaces.ExecutionData, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella)
|
||||
}
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -120,16 +120,16 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
// 4
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
// 5 <- head, justified epoch = 2
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert block 6 with justified epoch 3: verify it's head
|
||||
// 0
|
||||
@@ -138,15 +138,15 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
// 4
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 3
|
||||
// 5 6 <- head, justified epoch = 3
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
@@ -174,10 +174,10 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert block 9 with justified epoch 3, it becomes head
|
||||
// Verify 9 is the head:
|
||||
// Insert block 10 with justified epoch 3, it becomes head
|
||||
// Verify 10 is the head:
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert new block 11 and verify head is at 11.
|
||||
// 5 6
|
||||
|
||||
@@ -448,6 +448,11 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
@@ -52,8 +52,8 @@ func (c *AttCaches) SaveUnaggregatedAttestations(atts []*ethpb.Attestation) erro
|
||||
|
||||
// UnaggregatedAttestations returns all the unaggregated attestations in cache.
|
||||
func (c *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
atts := make([]*ethpb.Attestation, 0, len(unAggregatedAtts))
|
||||
for _, att := range unAggregatedAtts {
|
||||
|
||||
@@ -59,8 +59,8 @@ func (s *Service) pruneExpiredAtts() {
|
||||
if err := s.cfg.Pool.DeleteBlockAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not delete expired block attestation")
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
doublylinkedlist "github.com/prysmaticlabs/prysm/v3/container/doubly-linked-list"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls/blst"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -67,26 +66,21 @@ func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.Sign
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
node := p.pending.First()
|
||||
node := p.pending.Last()
|
||||
for node != nil && len(result) < length {
|
||||
change, err := node.Value()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, change)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("removing invalid BLSToExecutionChange from pool")
|
||||
// MarkIncluded removes the invalid change from the pool
|
||||
p.lock.RUnlock()
|
||||
if err := p.MarkIncluded(change); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
p.lock.RLock()
|
||||
} else {
|
||||
result = append(result, change)
|
||||
copied := ethpb.CopyBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{change})
|
||||
result = append(result, copied[0])
|
||||
p.lock.RUnlock()
|
||||
if err := p.MarkIncluded(change); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark included")
|
||||
}
|
||||
node, err = node.Next()
|
||||
p.lock.RLock()
|
||||
node, err = node.Prev()
|
||||
if err != nil {
|
||||
p.lock.RUnlock()
|
||||
return nil, err
|
||||
@@ -97,35 +91,11 @@ func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.Sign
|
||||
return result, nil
|
||||
}
|
||||
// We now verify the signatures in batches
|
||||
cSet, err := blocks.BLSChangesSignatureBatch(st, result)
|
||||
_, err := blocks.BLSChangesSignatureBatch(st, result)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not get BLSToExecutionChanges signatures")
|
||||
} else {
|
||||
ok, err := cSet.Verify()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not batch verify BLSToExecutionChanges signatures")
|
||||
} else if ok {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
// Batch signature failed, check signatures individually
|
||||
verified := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
for i, sig := range cSet.Signatures {
|
||||
signature, err := blst.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warning("could not get signature from bytes")
|
||||
continue
|
||||
}
|
||||
if !signature.Verify(cSet.PublicKeys[i], cSet.Messages[i][:]) {
|
||||
logrus.Warning("removing BLSToExecutionChange with invalid signature from pool")
|
||||
if err := p.MarkIncluded(result[i]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
} else {
|
||||
verified = append(verified, result[i])
|
||||
}
|
||||
}
|
||||
return verified, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// InsertBLSToExecChange inserts an object into the pool.
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
// We want FIFO semantics, which means validator with index 16 shouldn't be returned
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
for _, ch := range changes {
|
||||
assert.NotEqual(t, types.ValidatorIndex(16), ch.Message.ValidatorIndex)
|
||||
assert.NotEqual(t, types.ValidatorIndex(15), ch.Message.ValidatorIndex)
|
||||
}
|
||||
})
|
||||
t.Run("One Bad change", func(t *testing.T) {
|
||||
@@ -143,19 +143,19 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
assert.Equal(t, types.ValidatorIndex(30), changes[1].Message.ValidatorIndex)
|
||||
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
|
||||
})
|
||||
t.Run("One Bad Signature", func(t *testing.T) {
|
||||
pool := NewPool()
|
||||
copy(signedChanges[1].Signature, signedChanges[2].Signature)
|
||||
copy(signedChanges[30].Signature, signedChanges[31].Signature)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
pool.InsertBLSToExecChange(signedChanges[i])
|
||||
}
|
||||
changes, err := pool.BLSToExecChangesForInclusion(st)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
|
||||
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
|
||||
assert.Equal(t, types.ValidatorIndex(29), changes[1].Message.ValidatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -416,6 +416,8 @@ func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http
|
||||
data = &EventChainReorgJson{}
|
||||
case events.SyncCommitteeContributionTopic:
|
||||
data = &SignedContributionAndProofJson{}
|
||||
case events.BLSToExecutionChangeTopic:
|
||||
data = &SignedBLSToExecutionChangeJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
|
||||
@@ -864,6 +864,7 @@ type BeaconStateCapellaJson struct {
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
@@ -1037,6 +1038,11 @@ type ForkChoiceDumpJson struct {
|
||||
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
|
||||
}
|
||||
|
||||
type HistoricalSummaryJson struct {
|
||||
BlockSummaryRoot string `json:"block_summary_root" hex:"true"`
|
||||
StateSummaryRoot string `json:"state_summary_root" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
@@ -54,7 +54,6 @@ go_library(
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -115,6 +114,7 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
|
||||
@@ -142,32 +142,25 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
capellaBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
if ok {
|
||||
if err := bs.submitBlindedCapellaBlock(ctx, capellaBlkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock:
|
||||
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock:
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// At the end we check forks that don't have blinded blocks.
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
@@ -220,29 +220,25 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
|
||||
defer span.End()
|
||||
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
switch blkContainer := req.Message.(type) {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_Phase0Block:
|
||||
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_AltairBlock:
|
||||
if err := bs.submitAltairBlock(ctx, blkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_BellatrixBlock:
|
||||
if err := bs.submitBellatrixBlock(ctx, blkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
capellaBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainer_CapellaBlock)
|
||||
if ok {
|
||||
if err := bs.submitCapellaBlock(ctx, capellaBlkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
case *ethpbv2.SignedBeaconBlockContainer_CapellaBlock:
|
||||
if err := bs.submitCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block container type %T", blkContainer)
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
@@ -43,13 +44,6 @@ func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
@@ -86,13 +80,12 @@ func fillDBTestBlocksAltair(ctx context.Context, t *testing.T, beaconDB db.Datab
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -128,13 +121,28 @@ func fillDBTestBlocksBellatrix(ctx context.Context, t *testing.T, beaconDB db.Da
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayload = &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
Transactions: [][]byte{[]byte("transaction1"), []byte("transaction2")},
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -170,13 +178,42 @@ func fillDBTestBlocksCapella(ctx context.Context, t *testing.T, beaconDB db.Data
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayload = &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
Transactions: [][]byte{[]byte("transaction1"), []byte("transaction2")},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: bytesutil.PadTo([]byte("address1"), 20),
|
||||
Amount: 1,
|
||||
},
|
||||
{
|
||||
Index: 2,
|
||||
ValidatorIndex: 2,
|
||||
Address: bytesutil.PadTo([]byte("address2"), 20),
|
||||
Amount: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -212,13 +249,28 @@ func fillDBTestBlocksBellatrixBlinded(ctx context.Context, t *testing.T, beaconD
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayloadHeader = &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte("transactions_root"), 32),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
@@ -254,13 +306,29 @@ func fillDBTestBlocksCapellaBlinded(ctx context.Context, t *testing.T, beaconDB
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
syncCommitteeBits := bitfield.NewBitvector512()
|
||||
syncCommitteeBits.SetBitAt(100, true)
|
||||
b.Block.Body.SyncAggregate = ðpbalpha.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: bytesutil.PadTo([]byte("signature"), 96),
|
||||
}
|
||||
b.Block.Body.ExecutionPayloadHeader = &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent_hash"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee_recipient"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("state_root"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts_root"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs_bloom"), 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("prev_randao"), 32),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 123,
|
||||
GasUsed: 123,
|
||||
Timestamp: 123,
|
||||
ExtraData: bytesutil.PadTo([]byte("extra_data"), 32),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("base_fee_per_gas"), 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("block_hash"), 32),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte("transactions_root"), 32),
|
||||
WithdrawalsRoot: bytesutil.PadTo([]byte("withdrawals_root"), 32),
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
signedB, err := blocks.NewSignedBeaconBlock(b)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -49,8 +50,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.AltairForkEpoch = 100
|
||||
config.BellatrixForkVersion = []byte("BellatrixForkVersion")
|
||||
config.BellatrixForkEpoch = 101
|
||||
config.ShardingForkVersion = []byte("ShardingForkVersion")
|
||||
config.ShardingForkEpoch = 102
|
||||
config.CapellaForkVersion = []byte("CapellaForkVersion")
|
||||
config.CapellaForkEpoch = 103
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
@@ -138,7 +137,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
assert.Equal(t, 103, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -203,10 +202,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
|
||||
case "BELLATRIX_FORK_EPOCH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "SHARDING_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ShardingForkVersion")), v)
|
||||
case "SHARDING_FORK_EPOCH":
|
||||
assert.Equal(t, "102", v)
|
||||
case "CAPELLA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
@@ -425,6 +420,6 @@ func TestForkSchedule_CorrectNumberOfForks(t *testing.T) {
|
||||
s := &Server{}
|
||||
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
// Genesis and Altair.
|
||||
assert.Equal(t, 3, len(resp.Data))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -312,38 +311,16 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
|
||||
func (bs *Server) SubmitSignedBLSToExecutionChanges(ctx context.Context, req *ethpbv2.SubmitBLSToExecutionChangesRequest) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitSignedBLSToExecutionChanges")
|
||||
defer span.End()
|
||||
st, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
var failures []*helpers.SingleIndexedVerificationFailure
|
||||
for i, change := range req.GetChanges() {
|
||||
for _, change := range req.GetChanges() {
|
||||
alphaChange := migration.V2SignedBLSToExecutionChangeToV1Alpha1(change)
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, alphaChange)
|
||||
if err != nil {
|
||||
failures = append(failures, &helpers.SingleIndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not validate SignedBLSToExecutionChange: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
if err := blocks.VerifyBLSChangeSignature(st, change); err != nil {
|
||||
failures = append(failures, &helpers.SingleIndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not validate signature: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
bs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: alphaChange,
|
||||
},
|
||||
})
|
||||
bs.BLSChangesPool.InsertBLSToExecChange(alphaChange)
|
||||
if st.Version() >= version.Capella {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaChange); err != nil {
|
||||
failures = append(failures, &helpers.SingleIndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not broadcast BLSToExecutionChange: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) > 0 {
|
||||
failuresContainer := &helpers.IndexedVerificationFailure{Failures: failures}
|
||||
|
||||
@@ -1205,8 +1205,8 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -1342,8 +1342,8 @@ func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
|
||||
|
||||
spc := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -1405,8 +1405,8 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ const (
|
||||
ChainReorgTopic = "chain_reorg"
|
||||
// SyncCommitteeContributionTopic represents a new sync committee contribution event topic.
|
||||
SyncCommitteeContributionTopic = "contribution_and_proof"
|
||||
// BLSToExecutionChangeTopic represents a new received BLS to execution change event topic.
|
||||
BLSToExecutionChangeTopic = "bls_to_execution_change"
|
||||
)
|
||||
|
||||
var casesHandled = map[string]bool{
|
||||
@@ -43,6 +45,7 @@ var casesHandled = map[string]bool{
|
||||
FinalizedCheckpointTopic: true,
|
||||
ChainReorgTopic: true,
|
||||
SyncCommitteeContributionTopic: true,
|
||||
BLSToExecutionChangeTopic: true,
|
||||
}
|
||||
|
||||
// StreamEvents allows requesting all events from a set of topics defined in the Ethereum consensus API standard.
|
||||
@@ -178,6 +181,16 @@ func handleBlockOperationEvents(
|
||||
}
|
||||
v2Data := migration.V1Alpha1SignedContributionAndProofToV2(contributionData.Contribution)
|
||||
return streamData(stream, SyncCommitteeContributionTopic, v2Data)
|
||||
case operation.BLSToExecutionChangeReceived:
|
||||
if _, ok := requestedTopics[BLSToExecutionChangeTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
changeData, ok := event.Data.(*operation.BLSToExecutionChangeReceivedData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v2Change := migration.V1Alpha1SignedBLSToExecChangeToV2(changeData.Change)
|
||||
return streamData(stream, BLSToExecutionChangeTopic, v2Change)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -238,6 +238,43 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
feed: srv.OperationNotifier.OperationFeed(),
|
||||
})
|
||||
})
|
||||
t.Run(BLSToExecutionChangeTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
wantedChangeV1alpha1 := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: 1,
|
||||
FromBlsPubkey: []byte("from"),
|
||||
ToExecutionAddress: []byte("to"),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
wantedChange := migration.V1Alpha1SignedBLSToExecChangeToV2(wantedChangeV1alpha1)
|
||||
genericResponse, err := anypb.New(wantedChange)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BLSToExecutionChangeTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BLSToExecutionChangeTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: wantedChangeV1alpha1,
|
||||
},
|
||||
},
|
||||
feed: srv.OperationNotifier.OperationFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
|
||||
@@ -500,16 +500,9 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
altairBlk, err := vs.V1Alpha1Server.BuildAltairBeaconBlock(ctx, v1alpha1req)
|
||||
b, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
ok, b, err := vs.V1Alpha1Server.GetAndBuildBlindBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare blind beacon block: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unavailable, "Builder is not available due to miss-config or circuit breaker")
|
||||
return nil, status.Error(codes.Unavailable, "Could not get block from prysm API")
|
||||
}
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.GetBlindedBellatrix())
|
||||
if err != nil {
|
||||
|
||||
@@ -689,18 +689,20 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
mockExecutionChain := &mockExecution.Chain{}
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -797,19 +799,21 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
mochChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
mockExecutionChain := &mockExecution.Chain{}
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: mochChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mochChainService,
|
||||
HeadUpdater: mochChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
HeadFetcher: mochChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mochChainService,
|
||||
HeadUpdater: mochChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
TimeFetcher: mochChainService,
|
||||
OptimisticModeFetcher: mochChainService,
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -2105,6 +2109,14 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t,
|
||||
db.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{idx},
|
||||
[]*ethpbalpha.ValidatorRegistrationV1{{FeeRecipient: make([]byte, 20), Pubkey: make([]byte, 48)}}))
|
||||
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
|
||||
@@ -14,12 +14,12 @@ go_library(
|
||||
"proposer_attestations.go",
|
||||
"proposer_bellatrix.go",
|
||||
"proposer_builder.go",
|
||||
"proposer_capella.go",
|
||||
"proposer_deposits.go",
|
||||
"proposer_empty_block.go",
|
||||
"proposer_eth1data.go",
|
||||
"proposer_execution_payload.go",
|
||||
"proposer_exits.go",
|
||||
"proposer_phase0.go",
|
||||
"proposer_slashings.go",
|
||||
"proposer_sync_aggregate.go",
|
||||
"server.go",
|
||||
@@ -44,12 +44,12 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -125,6 +125,7 @@ common_deps = [
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -174,6 +175,7 @@ go_test(
|
||||
"attester_test.go",
|
||||
"blocks_test.go",
|
||||
"exit_test.go",
|
||||
"proposer_altair_test.go",
|
||||
"proposer_attestations_test.go",
|
||||
"proposer_bellatrix_test.go",
|
||||
"proposer_builder_test.go",
|
||||
|
||||
@@ -89,6 +89,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
case version.Capella:
|
||||
pb, err := data.SignedBlock.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
phBlk, ok := pb.(*ethpb.SignedBeaconBlockCapella)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockCapella")
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
@@ -136,6 +147,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: p}
|
||||
case *ethpb.SignedBeaconBlockBellatrix:
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||
case *ethpb.SignedBeaconBlockCapella:
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -112,6 +112,48 @@ func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.BeaconConfig())
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -154,3 +196,46 @@ func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 32)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wrappedBlk := util.SaveBlock(t, ctx, db, b)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{
|
||||
VerifiedOnly: true,
|
||||
}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -41,26 +42,110 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBeaconBlock")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
if slots.ToEpoch(req.Slot) < params.BeaconConfig().AltairForkEpoch {
|
||||
blk, err := vs.getPhase0BeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch phase0 beacon block: %v", err)
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: blk}}, nil
|
||||
} else if slots.ToEpoch(req.Slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
blk, err := vs.getAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch Altair beacon block: %v", err)
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: blk}}, nil
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
return nil, err
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return vs.getBellatrixBeaconBlock(ctx, req)
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
}
|
||||
head, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
|
||||
}
|
||||
|
||||
blk := sBlk.Block()
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
blk.SetSlot(req.Slot)
|
||||
blk.Body().SetGraffiti(req.Graffiti)
|
||||
blk.Body().SetRandaoReveal(req.RandaoReveal)
|
||||
blk.SetParentRoot(parentRoot)
|
||||
|
||||
// Set eth1 data.
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
eth1Data = ðpb.Eth1Data{DepositRoot: params.BeaconConfig().ZeroHash[:], BlockHash: params.BeaconConfig().ZeroHash[:]}
|
||||
log.WithError(err).Error("Could not get eth1data")
|
||||
}
|
||||
blk.Body().SetEth1Data(eth1Data)
|
||||
|
||||
// Set deposit and attestation.
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
|
||||
if err != nil {
|
||||
blk.Body().SetDeposits([]*ethpb.Deposit{})
|
||||
blk.Body().SetAttestations([]*ethpb.Attestation{})
|
||||
log.WithError(err).Error("Could not pack deposits and attestations")
|
||||
}
|
||||
blk.Body().SetDeposits(deposits)
|
||||
blk.Body().SetAttestations(atts)
|
||||
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not calculate proposer index %v", err)
|
||||
}
|
||||
blk.SetProposerIndex(idx)
|
||||
|
||||
// Set slashings.
|
||||
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
|
||||
blk.Body().SetProposerSlashings(validProposerSlashings)
|
||||
blk.Body().SetAttesterSlashings(validAttSlashings)
|
||||
|
||||
// Set exits.
|
||||
blk.Body().SetVoluntaryExits(vs.getExits(head, req.Slot))
|
||||
|
||||
// Set sync aggregate. New in Altair.
|
||||
vs.setSyncAggregate(ctx, blk)
|
||||
|
||||
// Set execution data. New in Bellatrix.
|
||||
if err := vs.setExecutionData(ctx, blk, head); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
// Set bls to execution change. New in Capella.
|
||||
vs.setBlsToExecData(blk, head)
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
sr = []byte{'e', 'v', 'i', 'l'}
|
||||
}
|
||||
blk.SetStateRoot(sr)
|
||||
|
||||
pb, err := blk.Proto()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch && !blk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch && blk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: pb.(*ethpb.BeaconBlockAltair)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
|
||||
@@ -2,77 +2,42 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
synccontribution "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (vs *Server) BuildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.BuildAltairBeaconBlock")
|
||||
defer span.End()
|
||||
blkData, err := vs.buildPhase0BlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build block data: %v", err)
|
||||
func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.BeaconBlock) {
|
||||
if blk.Version() < version.Altair {
|
||||
return
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
// No need for safe sub as req.Slot cannot be 0 if requesting Altair blocks. If 0, we will be throwing
|
||||
// an error in the first validity check of this endpoint.
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, blk.Slot()-1, blk.ParentRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := blk.Body().SetSyncAggregate(emptyAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
return ðpb.BeaconBlockAltair{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
SyncAggregate: syncAggregate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getAltairBeaconBlock")
|
||||
defer span.End()
|
||||
blk, err := vs.BuildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build block data: %v", err)
|
||||
// Can not error. We already filter block versioning at the top. Phase 0 is impossible.
|
||||
if err := blk.Body().SetSyncAggregate(syncAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockAltair{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := vs.computeStateRoot(ctx, wsb)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, fmt.Errorf("could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
|
||||
@@ -81,6 +46,9 @@ func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [3
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncCommitteePool == nil {
|
||||
return nil, errors.New("sync committee pool is nil")
|
||||
}
|
||||
// Contributions have to match the input root
|
||||
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
b, err := blocks.NewBeaconBlock(util.NewBeaconBlockAltair().Block)
|
||||
require.NoError(t, err)
|
||||
s := &Server{} // Sever is not initialized with sync committee pool.
|
||||
s.setSyncAggregate(context.Background(), b)
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
require.NoError(t, err)
|
||||
|
||||
emptySig := [96]byte{0xC0}
|
||||
want := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
require.DeepEqual(t, want, agg)
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -35,73 +35,41 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// block request. This value is known as `BUILDER_PROPOSAL_DELAY_TOLERANCE` in builder spec.
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
altairBlk, err := vs.BuildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.BeaconBlock, headState state.BeaconState) error {
|
||||
idx := blk.ProposerIndex()
|
||||
slot := blk.Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !req.SkipMevBoost {
|
||||
registered, err := vs.validatorRegistered(ctx, altairBlk.ProposerIndex)
|
||||
if registered && err == nil {
|
||||
builderReady, b, err := vs.GetAndBuildBlindBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
// In the event of an error, the node should fall back to default execution engine for building block.
|
||||
log.WithError(err).Error("Failed to build a block from external builder, falling " +
|
||||
"back to local execution client")
|
||||
builderGetPayloadMissCount.Inc()
|
||||
} else if builderReady {
|
||||
return b, nil
|
||||
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to check if builder can be used")
|
||||
} else if canUseBuilder {
|
||||
h, err := vs.getPayloadHeaderFromBuilder(ctx, slot, idx)
|
||||
if err != nil {
|
||||
builderGetPayloadMissCount.Inc()
|
||||
log.WithError(err).Warn("Proposer: failed to get payload header from builder")
|
||||
} else {
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.Body().SetExecution(h); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set execution payload")
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
} else if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"validatorIndex": altairBlk.ProposerIndex,
|
||||
}).Error("Could not determine validator has registered. Defaulting to local execution client")
|
||||
}
|
||||
}
|
||||
payload, err := vs.getExecutionPayload(ctx, req.Slot, altairBlk.ProposerIndex, bytesutil.ToBytes32(altairBlk.ParentRoot))
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
|
||||
blk := ðpb.BeaconBlockBellatrix{
|
||||
Slot: altairBlk.Slot,
|
||||
ProposerIndex: altairBlk.ProposerIndex,
|
||||
ParentRoot: altairBlk.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: altairBlk.Body.RandaoReveal,
|
||||
Eth1Data: altairBlk.Body.Eth1Data,
|
||||
Graffiti: altairBlk.Body.Graffiti,
|
||||
ProposerSlashings: altairBlk.Body.ProposerSlashings,
|
||||
AttesterSlashings: altairBlk.Body.AttesterSlashings,
|
||||
Attestations: altairBlk.Body.Attestations,
|
||||
Deposits: altairBlk.Body.Deposits,
|
||||
VoluntaryExits: altairBlk.Body.VoluntaryExits,
|
||||
SyncAggregate: altairBlk.Body.SyncAggregate,
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
}
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockBellatrix{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := vs.computeStateRoot(ctx, wsb)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, fmt.Errorf("could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: blk}}, nil
|
||||
return blk.Body().SetExecution(executionData)
|
||||
}
|
||||
|
||||
// This function retrieves the payload header given the slot number and the validator index.
|
||||
// It's a no-op if the latest head block is not versioned bellatrix.
|
||||
func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Slot, idx types.ValidatorIndex) (*enginev1.ExecutionPayloadHeader, error) {
|
||||
func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Slot, idx types.ValidatorIndex) (interfaces.ExecutionData, error) {
|
||||
b, err := vs.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -118,6 +86,10 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, blockBuilderTimeout)
|
||||
defer cancel()
|
||||
|
||||
bid, err := vs.BlockBuilder.GetHeader(ctx, slot, bytesutil.ToBytes32(h.BlockHash()), pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -161,48 +133,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
|
||||
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
|
||||
}).Info("Received header with bid")
|
||||
return bid.Message.Header, nil
|
||||
}
|
||||
|
||||
// This function constructs the builder block given the input altair block and the header. It returns a generic beacon block for signing
|
||||
func (vs *Server) buildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair, h *enginev1.ExecutionPayloadHeader) (*ethpb.GenericBeaconBlock, error) {
|
||||
if b == nil || b.Body == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
if h == nil {
|
||||
return nil, errors.New("nil header")
|
||||
}
|
||||
|
||||
blk := ðpb.BlindedBeaconBlockBellatrix{
|
||||
Slot: b.Slot,
|
||||
ProposerIndex: b.ProposerIndex,
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Body: ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
RandaoReveal: b.Body.RandaoReveal,
|
||||
Eth1Data: b.Body.Eth1Data,
|
||||
Graffiti: b.Body.Graffiti,
|
||||
ProposerSlashings: b.Body.ProposerSlashings,
|
||||
AttesterSlashings: b.Body.AttesterSlashings,
|
||||
Attestations: b.Body.Attestations,
|
||||
Deposits: b.Body.Deposits,
|
||||
VoluntaryExits: b.Body.VoluntaryExits,
|
||||
SyncAggregate: b.Body.SyncAggregate,
|
||||
ExecutionPayloadHeader: h,
|
||||
},
|
||||
}
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBlindedBeaconBlockBellatrix{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := vs.computeStateRoot(ctx, wsb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: blk}}, nil
|
||||
return consensusblocks.WrappedExecutionPayloadHeader(bid.Message.Header)
|
||||
}
|
||||
|
||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||
@@ -316,68 +247,6 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
|
||||
return wb, nil
|
||||
}
|
||||
|
||||
// readyForBuilder returns true if builder is allowed to be used. Builder is only allowed to be use after the
|
||||
// first finalized checkpt has been execution-enabled.
|
||||
func (vs *Server) readyForBuilder(ctx context.Context) (bool, error) {
|
||||
cp := vs.FinalizationFetcher.FinalizedCheckpt()
|
||||
// Checkpoint root is zero means we are still at genesis epoch.
|
||||
if bytesutil.ToBytes32(cp.Root) == params.BeaconConfig().ZeroHash {
|
||||
return false, nil
|
||||
}
|
||||
b, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err = consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return blocks.IsExecutionBlock(b.Block().Body())
|
||||
}
|
||||
|
||||
// GetAndBuildBlindBlock builds blind block from builder network. Returns a boolean status, built block and error.
|
||||
// If the status is false that means builder the header block is disallowed.
|
||||
// This routine is time limited by `blockBuilderTimeout`.
|
||||
func (vs *Server) GetAndBuildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
|
||||
// No op. Builder is not defined. User did not specify a user URL. We should use local EE.
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return false, nil, nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, blockBuilderTimeout)
|
||||
defer cancel()
|
||||
// Does the protocol allow for builder at this current moment. Builder is only allowed post merge after finalization.
|
||||
ready, err := vs.readyForBuilder(ctx)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not determine if builder is ready")
|
||||
}
|
||||
if !ready {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
circuitBreak, err := vs.circuitBreakBuilder(b.Slot)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not determine if builder circuit breaker condition")
|
||||
}
|
||||
if circuitBreak {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
h, err := vs.getPayloadHeaderFromBuilder(ctx, b.Slot, b.ProposerIndex)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", h.BlockHash),
|
||||
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient),
|
||||
"gasUsed": h.GasUsed,
|
||||
"slot": b.Slot,
|
||||
}).Info("Retrieved header from builder")
|
||||
gb, err := vs.buildBlindBlock(ctx, b, h)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not combine altair block with payload header")
|
||||
}
|
||||
return true, gb, nil
|
||||
}
|
||||
|
||||
// Validates builder signature and returns an error if the signature is invalid.
|
||||
func validateBuilderSignature(bid *ethpb.SignedBuilderBid) error {
|
||||
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,
|
||||
|
||||
@@ -5,30 +5,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
blockchainTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
@@ -37,59 +21,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestServer_buildHeaderBlock(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, keys := util.DeterministicGenesisStateAltair(t, 16384)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
copiedState := beaconState.Copy()
|
||||
|
||||
proposerServer := &Server{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
}
|
||||
b, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r := bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
util.SaveBlock(t, ctx, proposerServer.BeaconDB, b)
|
||||
require.NoError(t, proposerServer.BeaconDB.SaveState(ctx, beaconState, r))
|
||||
|
||||
b1, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{StateGen: stategen.New(db, doublylinkedtree.New()), BeaconDB: db}
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
got, err := vs.buildBlindBlock(ctx, b1.Block, h)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, h, got.GetBlindedBellatrix().Body.ExecutionPayloadHeader)
|
||||
|
||||
_, err = vs.buildBlindBlock(ctx, nil, h)
|
||||
require.ErrorContains(t, "nil block", err)
|
||||
|
||||
_, err = vs.buildBlindBlock(ctx, b1.Block, nil)
|
||||
require.ErrorContains(t, "nil header", err)
|
||||
}
|
||||
|
||||
func TestServer_getPayloadHeader(t *testing.T) {
|
||||
emptyRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
require.NoError(t, err)
|
||||
@@ -225,7 +158,11 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
require.ErrorContains(t, tc.err, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.returnedHeader, h)
|
||||
if tc.returnedHeader != nil {
|
||||
want, err := blocks.WrappedExecutionPayloadHeader(tc.returnedHeader)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, h)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -370,521 +307,6 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_readyForBuilder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
vs := &Server{BeaconDB: dbTest.SetupDB(t)}
|
||||
cs := &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}} // Checkpoint root is zeros.
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err := vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
wb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
wbr, err := wb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := blocks.NewSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb))
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb1))
|
||||
|
||||
// Ready is false given finalized block does not have execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Ready is true given finalized block has execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
}
|
||||
|
||||
func TestServer_getAndBuildHeaderBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
vs := &Server{}
|
||||
|
||||
// Nil builder
|
||||
ready, _, err := vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Not configured
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{}
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Block is not ready
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true}
|
||||
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Failed to get header
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := blocks.NewSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
vs.BeaconDB = dbTest.SetupDB(t)
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb1))
|
||||
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}}
|
||||
vs.HeadFetcher = &blockchainTest.ChainService{Block: wb1}
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("could not get payload")}
|
||||
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, ðpb.BeaconBlockAltair{})
|
||||
require.ErrorContains(t, "could not get payload", err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Block built and validated!
|
||||
beaconState, keys := util.DeterministicGenesisStateAltair(t, 16384)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
copiedState := beaconState.Copy()
|
||||
|
||||
b, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r := bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
util.SaveBlock(t, ctx, vs.BeaconDB, b)
|
||||
require.NoError(t, vs.BeaconDB.SaveState(ctx, beaconState, r))
|
||||
|
||||
altairBlk, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := uint64(time.Now().Unix()) + uint64(altairBlk.Block.Slot)*params.BeaconConfig().SecondsPerSlot
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
Timestamp: ts,
|
||||
}
|
||||
|
||||
vs.StateGen = stategen.New(vs.BeaconDB, doublylinkedtree.New())
|
||||
vs.GenesisFetcher = &blockchainTest.ChainService{}
|
||||
vs.ForkFetcher = &blockchainTest.ChainService{Fork: ðpb.Fork{}}
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
bid := ðpb.BuilderBid{
|
||||
Header: &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
Timestamp: ts,
|
||||
},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpb.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: sBid}
|
||||
vs.TimeFetcher = &blockchainTest.ChainService{Genesis: time.Now()}
|
||||
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
ready, builtBlk, err := vs.GetAndBuildBlindBlock(ctx, altairBlk.Block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
require.DeepEqual(t, h, builtBlk.GetBlindedBellatrix().Body.ExecutionPayloadHeader)
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_HappyCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
}
|
||||
proposerServer.ProposerSlotIndexCache.SetProposerAndPayloadIDs(17, 11, [8]byte{'a'}, parentRoot)
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, emptyPayload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_LocalProgressingWithBuilderSkipped(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
}
|
||||
proposerServer.ProposerSlotIndexCache.SetProposerAndPayloadIDs(17, 11, [8]byte{'a'}, parentRoot)
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Configure builder, this should fail if it's not local engine processing
|
||||
proposerServer.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("bad)")}
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
SkipMevBoost: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, emptyPayload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_BuilderCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := blocks.NewSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wb1))
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, prysmtime.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
tstamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
Timestamp: uint64(tstamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := &Server{
|
||||
FinalizationFetcher: &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}},
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false, Block: wb1},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Unix(int64(beaconState.GenesisTime()), 0)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ForkFetcher: &blockchainTest.ChainService{Fork: ðpb.Fork{}},
|
||||
GenesisFetcher: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
}
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
bid := ðpb.BuilderBid{
|
||||
Header: h,
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpb.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
proposerServer.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: sBid}
|
||||
proposerServer.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, proposerServer.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{11},
|
||||
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength), Pubkey: bytesutil.PadTo([]byte{}, fieldparams.BLSPubkeyLength)}}))
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg.MaxBuilderConsecutiveMissedSlots = bellatrixSlot + 1
|
||||
cfg.MaxBuilderEpochMissedSlots = 32
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_BlindedBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, h, bellatrixBlk.BlindedBellatrix.Body.ExecutionPayloadHeader) // Payload header should equal.
|
||||
}
|
||||
|
||||
func TestServer_validateBuilderSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -58,7 +58,7 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
|
||||
"currentSlot": s,
|
||||
"highestReceivedSlot": highestReceivedSlot,
|
||||
"maxConsecutiveSkipSlotsAllowed": maxConsecutiveSkipSlotsAllowed,
|
||||
}).Warn("Builder circuit breaker activated due to missing consecutive slot")
|
||||
}).Warn("Circuit breaker activated due to missing consecutive slot. Ignore if mev-boost is not used")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalMissed": diff,
|
||||
"maxEpochSkipSlotsAllowed": maxEpochSkipSlotsAllowed,
|
||||
}).Warn("Builder circuit breaker activated due to missing enough slots last epoch")
|
||||
}).Warn("Circuit breaker activated due to missing enough slots last epoch. Ignore if mev-boost is not used")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
err,
|
||||
)
|
||||
require.Equal(t, true, b)
|
||||
require.LogsContain(t, hook, "Builder circuit breaker activated due to missing consecutive slot")
|
||||
require.LogsContain(t, hook, "Circuit breaker activated due to missing consecutive slot. Ignore if mev-boost is not used")
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -56,7 +56,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, b)
|
||||
require.LogsContain(t, hook, "Builder circuit breaker activated due to missing enough slots last epoch")
|
||||
require.LogsContain(t, hook, "Circuit breaker activated due to missing enough slots last epoch. Ignore if mev-boost is not used")
|
||||
|
||||
want := params.BeaconConfig().SlotsPerEpoch - params.BeaconConfig().MaxBuilderEpochMissedSlots
|
||||
for i := types.Slot(2); i <= want+2; i++ {
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// Sets the bls to exec data for a block.
|
||||
func (vs *Server) setBlsToExecData(blk interfaces.BeaconBlock, headState state.BeaconState) {
|
||||
if blk.Version() < version.Capella {
|
||||
return
|
||||
}
|
||||
if err := blk.Body().SetBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{}); err != nil {
|
||||
log.WithError(err).Error("Could not set bls to execution data in block")
|
||||
return
|
||||
}
|
||||
changes, err := vs.BLSChangesPool.BLSToExecChangesForInclusion(headState)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get bls to execution changes")
|
||||
return
|
||||
} else {
|
||||
if err := blk.Body().SetBLSToExecutionChanges(changes); err != nil {
|
||||
log.WithError(err).Error("Could not set bls to execution changes")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -41,7 +42,7 @@ var (
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex, headRoot [32]byte) (*enginev1.ExecutionPayload, error) {
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
|
||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||
@@ -71,27 +72,14 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
pb, err := payload.PbBellatrix()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
warnIfFeeRecipientDiffers(pb, feeRecipient)
|
||||
return pb, nil
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
}
|
||||
}
|
||||
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err = transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parentHash []byte
|
||||
var hasTerminalBlock bool
|
||||
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
|
||||
@@ -111,14 +99,14 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
parentHash = header.BlockHash()
|
||||
} else {
|
||||
if activationEpochNotReached(slot) {
|
||||
return emptyPayload(), nil
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
}
|
||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !hasTerminalBlock {
|
||||
return emptyPayload(), nil
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
}
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
@@ -176,22 +164,18 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pb, err := payload.PbBellatrix()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
warnIfFeeRecipientDiffers(pb, feeRecipient)
|
||||
return pb, nil
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
||||
// match the requested one.
|
||||
func warnIfFeeRecipientDiffers(payload *enginev1.ExecutionPayload, feeRecipient common.Address) {
|
||||
func warnIfFeeRecipientDiffers(payload interfaces.ExecutionData, feeRecipient common.Address) {
|
||||
// Warn if the fee recipient is not the value we expect.
|
||||
if payload != nil && !bytes.Equal(payload.FeeRecipient, feeRecipient[:]) {
|
||||
if payload != nil && !bytes.Equal(payload.FeeRecipient(), feeRecipient[:]) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"wantedFeeRecipient": fmt.Sprintf("%#x", feeRecipient),
|
||||
"received": fmt.Sprintf("%#x", payload.FeeRecipient),
|
||||
"received": fmt.Sprintf("%#x", payload.FeeRecipient()),
|
||||
}).Warn("Fee recipient address from execution client is not what was expected. " +
|
||||
"It is possible someone has compromised your client to try and take your transaction fees")
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -160,7 +160,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
|
||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'})
|
||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -205,7 +205,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{})
|
||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
@@ -217,7 +217,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
payload.FeeRecipient = evilRecipientAddress[:]
|
||||
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{})
|
||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// blockData required to create a beacon block.
|
||||
type blockData struct {
|
||||
ParentRoot []byte
|
||||
Graffiti [32]byte
|
||||
ProposerIdx types.ValidatorIndex
|
||||
Eth1Data *ethpb.Eth1Data
|
||||
Deposits []*ethpb.Deposit
|
||||
Attestations []*ethpb.Attestation
|
||||
ProposerSlashings []*ethpb.ProposerSlashing
|
||||
AttesterSlashings []*ethpb.AttesterSlashing
|
||||
VoluntaryExits []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
func (vs *Server) getPhase0BeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getPhase0BeaconBlock")
|
||||
defer span.End()
|
||||
blkData, err := vs.buildPhase0BlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build block data: %v", err)
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
blk := ðpb.BeaconBlock{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
},
|
||||
}
|
||||
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err = vs.computeStateRoot(ctx, wsb)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// Build data required for creating a new beacon block, so this method can be shared across forks.
|
||||
func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequest) (*blockData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildPhase0BlockData")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
if err := vs.HeadUpdater.UpdateHead(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not process attestations and update head")
|
||||
}
|
||||
|
||||
// Retrieve the parent block as the current head of the canonical chain.
|
||||
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve head root: %v", err)
|
||||
}
|
||||
|
||||
head, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get head state %v", err)
|
||||
}
|
||||
|
||||
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not advance slots to calculate proposer index: %v", err)
|
||||
}
|
||||
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get ETH1 data: %v", err)
|
||||
}
|
||||
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
graffiti := bytesutil.ToBytes32(req.Graffiti)
|
||||
|
||||
// Calculate new proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not calculate proposer index %v", err)
|
||||
}
|
||||
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
for _, slashing := range proposerSlashings {
|
||||
_, err := blocks.ProcessProposerSlashing(ctx, head, slashing, v.SlashValidator)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: invalid proposer slashing")
|
||||
continue
|
||||
}
|
||||
validProposerSlashings = append(validProposerSlashings, slashing)
|
||||
}
|
||||
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validAttSlashings := make([]*ethpb.AttesterSlashing, 0, len(attSlashings))
|
||||
for _, slashing := range attSlashings {
|
||||
_, err := blocks.ProcessAttesterSlashing(ctx, head, slashing, v.SlashValidator)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: invalid attester slashing")
|
||||
continue
|
||||
}
|
||||
validAttSlashings = append(validAttSlashings, slashing)
|
||||
}
|
||||
exits := vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/)
|
||||
validExits := make([]*ethpb.SignedVoluntaryExit, 0, len(exits))
|
||||
for _, exit := range exits {
|
||||
val, err := head.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: invalid exit")
|
||||
continue
|
||||
}
|
||||
if err := blocks.VerifyExitAndSignature(val, head.Slot(), head.Fork(), exit, head.GenesisValidatorsRoot()); err != nil {
|
||||
log.WithError(err).Warn("Proposer: invalid exit")
|
||||
continue
|
||||
}
|
||||
validExits = append(validExits, exit)
|
||||
}
|
||||
|
||||
return &blockData{
|
||||
ParentRoot: parentRoot,
|
||||
Graffiti: graffiti,
|
||||
ProposerIdx: idx,
|
||||
Eth1Data: eth1Data,
|
||||
Deposits: deposits,
|
||||
Attestations: atts,
|
||||
ProposerSlashings: validProposerSlashings,
|
||||
AttesterSlashings: validAttSlashings,
|
||||
VoluntaryExits: validExits,
|
||||
}, nil
|
||||
}
|
||||
@@ -19,14 +19,19 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
coretime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -51,6 +56,449 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestServer_GetBeaconBlock_Phase0(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
util.SaveBlock(t, ctx, db, genBlk)
|
||||
|
||||
parentRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
proposerSlashings, attSlashings := injectSlashings(t, beaconState, privKeys, proposerServer)
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
phase0Blk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Phase0)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, req.Slot, phase0Blk.Phase0.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], phase0Blk.Phase0.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, phase0Blk.Phase0.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, phase0Blk.Phase0.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(phase0Blk.Phase0.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, phase0Blk.Phase0.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(phase0Blk.Phase0.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, phase0Blk.Phase0.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Altair(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
altairSlot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: altairSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: altairSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
proposerSlashings, attSlashings := injectSlashings(t, beaconState, privKeys, proposerServer)
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
altairBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Altair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, altairBlk.Altair.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], altairBlk.Altair.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, altairBlk.Altair.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, altairBlk.Altair.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(altairBlk.Altair.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, altairBlk.Altair.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(altairBlk.Altair.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, altairBlk.Altair.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Bellatrix(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
c := mockExecution.New()
|
||||
c.HashesByHeight[0] = terminalBlockHash
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
proposerServer.Eth1BlockFetcher = c
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, bellatrixBlk.Bellatrix.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], bellatrixBlk.Bellatrix.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
newHook := logTest.NewGlobal()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg = params.MinimalSpecConfig().Copy()
|
||||
cfg.DefaultFeeRecipient = common.Address{'b'}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Capella(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
transition.SkipSlotCache.Disable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
Slot: capellaSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyCapella{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), capellaSlot+1)
|
||||
require.NoError(t, err)
|
||||
payload := &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayloadCapella: payload,
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: capellaSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
copiedState := beaconState.Copy()
|
||||
copiedState, err = transition.ProcessSlots(ctx, copiedState, capellaSlot+1)
|
||||
require.NoError(t, err)
|
||||
change, err := util.GenerateBLSToExecutionChange(copiedState, privKeys[1], 0)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BLSChangesPool.InsertBLSToExecChange(change)
|
||||
|
||||
got, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got.GetCapella().Body.BlsToExecutionChanges))
|
||||
require.DeepEqual(t, change, got.GetCapella().Body.BlsToExecutionChanges[0])
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
_, err = proposerServer.GetBeaconBlock(context.Background(), req)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
}
|
||||
|
||||
func getProposerServer(db db.HeadAccessDatabase, headState state.BeaconState, headRoot []byte) *Server {
|
||||
return &Server{
|
||||
HeadFetcher: &mock.ChainService{State: headState, Root: headRoot},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
OptimisticModeFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BeaconDB: db,
|
||||
BLSChangesPool: blstoexec.NewPool(),
|
||||
}
|
||||
}
|
||||
|
||||
func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, server *Server) ([]*ethpb.ProposerSlashing, []*ethpb.AttesterSlashing) {
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(st, keys[i], i /* validator index */)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = server.SlashingsPool.InsertProposerSlashing(context.Background(), st, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(st, keys[i+params.BeaconConfig().MaxProposerSlashings], types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings) /* validator index */)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = server.SlashingsPool.InsertAttesterSlashing(context.Background(), st, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return proposerSlashings, attSlashings
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1914,382 +2362,6 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_PreForkEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
util.SaveBlock(t, ctx, db, genBlk)
|
||||
|
||||
parentRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
phase0Blk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Phase0)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, phase0Blk.Phase0.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], phase0Blk.Phase0.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, phase0Blk.Phase0.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, phase0Blk.Phase0.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(phase0Blk.Phase0.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, phase0Blk.Phase0.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(phase0Blk.Phase0.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, phase0Blk.Phase0.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
altairSlot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: altairSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: altairSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
altairBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Altair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, altairBlk.Altair.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], altairBlk.Altair.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, altairBlk.Altair.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, altairBlk.Altair.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(altairBlk.Altair.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, altairBlk.Altair.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(altairBlk.Altair.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, altairBlk.Altair.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
c := mockExecution.New()
|
||||
c.HashesByHeight[0] = terminalBlockHash
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: c,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, bellatrixBlk.Bellatrix.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], bellatrixBlk.Bellatrix.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
newHook := logTest.NewGlobal()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg = params.MinimalSpecConfig().Copy()
|
||||
cfg.DefaultFeeRecipient = common.Address{'b'}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
_, err = proposerServer.GetBeaconBlock(context.Background(), req)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
@@ -72,6 +73,7 @@ type Server struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
|
||||
@@ -223,6 +223,7 @@ func (s *Service) Start() {
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -113,13 +114,29 @@ func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.Beacon
|
||||
}
|
||||
case "finalized":
|
||||
checkpoint := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
targetSlot, err := slots.EpochStart(checkpoint.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get start slot")
|
||||
}
|
||||
// We use the stategen replayer to fetch the finalized state and then
|
||||
// replay it to the start slot of our checkpoint's epoch. The replayer
|
||||
// only ever accesses our canonical history, so the state retrieved will
|
||||
// always be the finalized state at that epoch.
|
||||
s, err = p.ReplayerBuilder.ReplayerForSlot(targetSlot).ReplayToSlot(ctx, targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized state")
|
||||
}
|
||||
case "justified":
|
||||
checkpoint := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
|
||||
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
targetSlot, err := slots.EpochStart(checkpoint.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get start slot")
|
||||
}
|
||||
// We use the stategen replayer to fetch the justified state and then
|
||||
// replay it to the start slot of our checkpoint's epoch. The replayer
|
||||
// only ever accesses our canonical history, so the state retrieved will
|
||||
// always be the justified state at that epoch.
|
||||
s, err = p.ReplayerBuilder.ReplayerForSlot(targetSlot).ReplayToSlot(ctx, targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get justified state")
|
||||
}
|
||||
@@ -203,9 +220,6 @@ func (p *StateProvider) StateBySlot(ctx context.Context, target types.Slot) (sta
|
||||
if target > p.GenesisTimeFetcher.CurrentSlot() {
|
||||
return nil, errors.New("requested slot is in the future")
|
||||
}
|
||||
if target > p.ChainInfoFetcher.HeadSlot() {
|
||||
return nil, errors.New("requested slot number is higher than head slot number")
|
||||
}
|
||||
|
||||
st, err := p.ReplayerBuilder.ReplayerForSlot(target).ReplayBlocks(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -94,15 +94,19 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
stateGen := mockstategen.NewMockService()
|
||||
replayer := mockstategen.NewMockReplayerBuilder()
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
Root: stateRoot[:],
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
StateGenService: stateGen,
|
||||
ReplayerBuilder: replayer,
|
||||
}
|
||||
|
||||
s, err := p.State(ctx, []byte("finalized"))
|
||||
@@ -114,15 +118,19 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
stateGen := mockstategen.NewMockService()
|
||||
replayer := mockstategen.NewMockReplayerBuilder()
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
Root: stateRoot[:],
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
StateGenService: stateGen,
|
||||
ReplayerBuilder: replayer,
|
||||
}
|
||||
|
||||
s, err := p.State(ctx, []byte("justified"))
|
||||
@@ -387,11 +395,16 @@ func TestStateBySlot_FutureSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
st, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 100})
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 100})
|
||||
require.NoError(t, err)
|
||||
slotSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 101})
|
||||
require.NoError(t, err)
|
||||
currentSlot := types.Slot(102)
|
||||
mock := &chainMock.ChainService{State: st, Slot: ¤tSlot}
|
||||
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
_, err = p.StateBySlot(context.Background(), 101)
|
||||
assert.ErrorContains(t, "requested slot number is higher than head slot number", err)
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewMockReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(slotSt, 101)
|
||||
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
st, err := p.StateBySlot(context.Background(), 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
@@ -18,11 +18,9 @@ import (
|
||||
type BeaconState interface {
|
||||
SpecParametersProvider
|
||||
ReadOnlyBeaconState
|
||||
ReadOnlyWithdrawals
|
||||
WriteOnlyBeaconState
|
||||
Copy() BeaconState
|
||||
HashTreeRoot(ctx context.Context) ([32]byte, error)
|
||||
FutureForkStub
|
||||
StateProver
|
||||
}
|
||||
|
||||
@@ -50,6 +48,10 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlyBalances
|
||||
ReadOnlyCheckpoint
|
||||
ReadOnlyAttestations
|
||||
ReadOnlyWithdrawals
|
||||
ReadOnlyParticipation
|
||||
ReadOnlyInactivity
|
||||
ReadOnlySyncCommittee
|
||||
ToProtoUnsafe() interface{}
|
||||
ToProto() interface{}
|
||||
GenesisTime() uint64
|
||||
@@ -57,7 +59,8 @@ type ReadOnlyBeaconState interface {
|
||||
Slot() types.Slot
|
||||
Fork() *ethpb.Fork
|
||||
LatestBlockHeader() *ethpb.BeaconBlockHeader
|
||||
HistoricalRoots() [][]byte
|
||||
HistoricalRoots() ([][]byte, error)
|
||||
HistoricalSummaries() ([]*ethpb.HistoricalSummary, error)
|
||||
Slashings() []uint64
|
||||
FieldReferencesCount() map[string]uint64
|
||||
MarshalSSZ() ([]byte, error)
|
||||
@@ -76,6 +79,9 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyBalances
|
||||
WriteOnlyCheckpoint
|
||||
WriteOnlyAttestations
|
||||
WriteOnlyParticipation
|
||||
WriteOnlyInactivity
|
||||
WriteOnlySyncCommittee
|
||||
SetGenesisTime(val uint64) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val types.Slot) error
|
||||
@@ -85,6 +91,7 @@ type WriteOnlyBeaconState interface {
|
||||
SetSlashings(val []uint64) error
|
||||
UpdateSlashingsAtIndex(idx, val uint64) error
|
||||
AppendHistoricalRoots(root [32]byte) error
|
||||
AppendHistoricalSummaries(*ethpb.HistoricalSummary) error
|
||||
SetLatestExecutionPayloadHeader(payload interfaces.ExecutionData) error
|
||||
SetNextWithdrawalIndex(i uint64) error
|
||||
SetNextWithdrawalValidatorIndex(i types.ValidatorIndex) error
|
||||
@@ -133,6 +140,7 @@ type ReadOnlyCheckpoint interface {
|
||||
FinalizedCheckpoint() *ethpb.Checkpoint
|
||||
FinalizedCheckpointEpoch() types.Epoch
|
||||
JustificationBits() bitfield.Bitvector4
|
||||
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyBlockRoots defines a struct which only has read access to block roots methods.
|
||||
@@ -174,6 +182,23 @@ type ReadOnlyWithdrawals interface {
|
||||
NextWithdrawalIndex() (uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyParticipation defines a struct which only has read access to participation methods.
|
||||
type ReadOnlyParticipation interface {
|
||||
CurrentEpochParticipation() ([]byte, error)
|
||||
PreviousEpochParticipation() ([]byte, error)
|
||||
}
|
||||
|
||||
// ReadOnlyInactivity defines a struct which only has read access to inactivity methods.
|
||||
type ReadOnlyInactivity interface {
|
||||
InactivityScores() ([]uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlySyncCommittee defines a struct which only has read access to sync committee methods.
|
||||
type ReadOnlySyncCommittee interface {
|
||||
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
NextSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
}
|
||||
|
||||
// WriteOnlyBlockRoots defines a struct which only has write access to block roots methods.
|
||||
type WriteOnlyBlockRoots interface {
|
||||
SetBlockRoots(val [][]byte) error
|
||||
@@ -232,23 +257,24 @@ type WriteOnlyAttestations interface {
|
||||
RotateAttestations() error
|
||||
}
|
||||
|
||||
// FutureForkStub defines methods that are used for future forks. This is a low cost solution to enable
|
||||
// various state casting of interface to work.
|
||||
type FutureForkStub interface {
|
||||
// WriteOnlyParticipation defines a struct which only has write access to participation methods.
|
||||
type WriteOnlyParticipation interface {
|
||||
AppendCurrentParticipationBits(val byte) error
|
||||
AppendPreviousParticipationBits(val byte) error
|
||||
AppendInactivityScore(s uint64) error
|
||||
CurrentEpochParticipation() ([]byte, error)
|
||||
PreviousEpochParticipation() ([]byte, error)
|
||||
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
|
||||
InactivityScores() ([]uint64, error)
|
||||
SetInactivityScores(val []uint64) error
|
||||
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
SetPreviousParticipationBits(val []byte) error
|
||||
SetCurrentParticipationBits(val []byte) error
|
||||
ModifyCurrentParticipationBits(func(val []byte) ([]byte, error)) error
|
||||
ModifyPreviousParticipationBits(func(val []byte) ([]byte, error)) error
|
||||
NextSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
}
|
||||
|
||||
// WriteOnlyInactivity defines a struct which only has write access to inactivity methods.
|
||||
type WriteOnlyInactivity interface {
|
||||
AppendInactivityScore(s uint64) error
|
||||
SetInactivityScores(val []uint64) error
|
||||
}
|
||||
|
||||
// WriteOnlySyncCommittee defines a struct which only has write access to sync committee methods.
|
||||
type WriteOnlySyncCommittee interface {
|
||||
SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
SetNextSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ type BeaconState struct {
|
||||
blockRoots *customtypes.BlockRoots
|
||||
stateRoots *customtypes.StateRoots
|
||||
historicalRoots customtypes.HistoricalRoots
|
||||
historicalSummaries []*ethpb.HistoricalSummary
|
||||
eth1Data *ethpb.Eth1Data
|
||||
eth1DataVotes []*ethpb.Eth1Data
|
||||
eth1DepositIndex uint64
|
||||
|
||||
@@ -27,6 +27,7 @@ type BeaconState struct {
|
||||
blockRoots *customtypes.BlockRoots
|
||||
stateRoots *customtypes.StateRoots
|
||||
historicalRoots customtypes.HistoricalRoots
|
||||
historicalSummaries []*ethpb.HistoricalSummary
|
||||
eth1Data *ethpb.Eth1Data
|
||||
eth1DataVotes []*ethpb.Eth1Data
|
||||
eth1DepositIndex uint64
|
||||
|
||||
@@ -3,6 +3,7 @@ package state_native
|
||||
import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// GenesisTime of the beacon state as a uint64.
|
||||
@@ -67,15 +68,15 @@ func (b *BeaconState) forkVal() *ethpb.Fork {
|
||||
}
|
||||
|
||||
// HistoricalRoots based on epochs stored in the beacon state.
|
||||
func (b *BeaconState) HistoricalRoots() [][]byte {
|
||||
func (b *BeaconState) HistoricalRoots() ([][]byte, error) {
|
||||
if b.historicalRoots == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.historicalRoots.Slice()
|
||||
return b.historicalRoots.Slice(), nil
|
||||
}
|
||||
|
||||
// balancesLength returns the length of the balances slice.
|
||||
@@ -87,3 +88,25 @@ func (b *BeaconState) balancesLength() int {
|
||||
|
||||
return len(b.balances)
|
||||
}
|
||||
|
||||
// HistoricalSummaries of the beacon state.
|
||||
func (b *BeaconState) HistoricalSummaries() ([]*ethpb.HistoricalSummary, error) {
|
||||
if b.version < version.Capella {
|
||||
return nil, errNotSupported("HistoricalSummaries", b.version)
|
||||
}
|
||||
|
||||
if b.historicalSummaries == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.historicalSummariesVal(), nil
|
||||
}
|
||||
|
||||
// historicalSummariesVal of the beacon state.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) historicalSummariesVal() []*ethpb.HistoricalSummary {
|
||||
return ethpb.CopyHistoricalSummaries(b.historicalSummaries)
|
||||
}
|
||||
|
||||
@@ -126,6 +126,7 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderCapella,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
@@ -252,6 +253,7 @@ func (b *BeaconState) ToProto() interface{} {
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderCapellaVal(),
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
|
||||
@@ -255,6 +255,13 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
nextWithdrawalValidatorIndexRoot := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(nextWithdrawalValidatorIndexRoot, uint64(state.nextWithdrawalValidatorIndex))
|
||||
fieldRoots[nativetypes.NextWithdrawalValidatorIndex.RealPosition()] = nextWithdrawalValidatorIndexRoot
|
||||
|
||||
// Historical summary root.
|
||||
historicalSummaryRoot, err := stateutil.HistoricalSummariesRoot(state.historicalSummaries)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute historical summary merkleization")
|
||||
}
|
||||
fieldRoots[nativetypes.HistoricalSummaries.RealPosition()] = historicalSummaryRoot[:]
|
||||
}
|
||||
|
||||
return fieldRoots, nil
|
||||
|
||||
@@ -258,6 +258,10 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, beaconState.SetNextWithdrawalIndex(123))
|
||||
require.NoError(t, beaconState.SetNextWithdrawalValidatorIndex(123))
|
||||
require.NoError(t, beaconState.AppendHistoricalSummaries(ðpb.HistoricalSummary{
|
||||
BlockSummaryRoot: bytesutil.PadTo([]byte("block summary root"), 32),
|
||||
StateSummaryRoot: bytesutil.PadTo([]byte("state summary root"), 32),
|
||||
}))
|
||||
|
||||
nativeState, ok := beaconState.(*statenative.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -298,6 +302,7 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
|
||||
{0x39, 0x29, 0x16, 0xe8, 0x5a, 0xd2, 0xb, 0xbb, 0x1f, 0xef, 0x6a, 0xe0, 0x2d, 0xa6, 0x6a, 0x46, 0x81, 0xba, 0xcf, 0x86, 0xfc, 0x16, 0x22, 0x2a, 0x9b, 0x72, 0x96, 0x71, 0x2b, 0xc7, 0x5b, 0x9d},
|
||||
{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
{0xa1, 0x4, 0x64, 0x31, 0x2a, 0xa, 0x49, 0x31, 0x1c, 0x1, 0x41, 0x17, 0xc0, 0x52, 0x52, 0xfa, 0x4c, 0xf4, 0x95, 0x4f, 0x5c, 0xb0, 0x5a, 0x40, 0xc1, 0x32, 0x39, 0xc3, 0x7c, 0xb7, 0x2c, 0x27},
|
||||
}
|
||||
assert.DeepEqual(t, expected, root)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -106,11 +107,15 @@ func (b *BeaconState) SetHistoricalRoots(val [][]byte) error {
|
||||
}
|
||||
|
||||
// AppendHistoricalRoots for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
// to the end of list.
|
||||
func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version > version.Bellatrix {
|
||||
return errNotSupported("AppendHistoricalRoots", b.version)
|
||||
}
|
||||
|
||||
roots := b.historicalRoots
|
||||
if b.sharedFieldReferences[nativetypes.HistoricalRoots].Refs() > 1 {
|
||||
roots = make([][32]byte, len(b.historicalRoots))
|
||||
@@ -124,6 +129,29 @@ func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendHistoricalSummaries for the beacon state. Appends the new value
|
||||
// to the end of list.
|
||||
func (b *BeaconState) AppendHistoricalSummaries(summary *ethpb.HistoricalSummary) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version < version.Capella {
|
||||
return errNotSupported("AppendHistoricalSummaries", b.version)
|
||||
}
|
||||
|
||||
summaries := b.historicalSummaries
|
||||
if b.sharedFieldReferences[nativetypes.HistoricalSummaries].Refs() > 1 {
|
||||
summaries = make([]*ethpb.HistoricalSummary, len(b.historicalSummaries))
|
||||
copy(summaries, b.historicalSummaries)
|
||||
b.sharedFieldReferences[nativetypes.HistoricalSummaries].MinusRef()
|
||||
b.sharedFieldReferences[nativetypes.HistoricalSummaries] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.historicalSummaries = append(summaries, summary)
|
||||
b.markFieldAsDirty(nativetypes.HistoricalSummaries)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recomputes the branch up the index in the Merkle trie representation
|
||||
// of the beacon state. This method performs slice reads and the caller MUST
|
||||
// hold the lock before calling this method.
|
||||
|
||||
@@ -82,13 +82,14 @@ var capellaFields = append(
|
||||
nativetypes.LatestExecutionPayloadHeaderCapella,
|
||||
nativetypes.NextWithdrawalIndex,
|
||||
nativetypes.NextWithdrawalValidatorIndex,
|
||||
nativetypes.HistoricalSummaries,
|
||||
)
|
||||
|
||||
const (
|
||||
phase0SharedFieldRefCount = 10
|
||||
altairSharedFieldRefCount = 11
|
||||
bellatrixSharedFieldRefCount = 12
|
||||
capellaSharedFieldRefCount = 13
|
||||
capellaSharedFieldRefCount = 14
|
||||
)
|
||||
|
||||
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
|
||||
@@ -433,6 +434,7 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
||||
latestExecutionPayloadHeaderCapella: st.LatestExecutionPayloadHeader,
|
||||
nextWithdrawalIndex: st.NextWithdrawalIndex,
|
||||
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
|
||||
historicalSummaries: st.HistoricalSummaries,
|
||||
|
||||
dirtyFields: make(map[nativetypes.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[nativetypes.FieldIndex][]uint64, fieldCount),
|
||||
@@ -466,6 +468,7 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
||||
b.sharedFieldReferences[nativetypes.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[nativetypes.InactivityScores] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[nativetypes.LatestExecutionPayloadHeaderCapella] = stateutil.NewRef(1) // New in Capella.
|
||||
b.sharedFieldReferences[nativetypes.HistoricalSummaries] = stateutil.NewRef(1) // New in Capella.
|
||||
|
||||
state.StateCount.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
@@ -512,6 +515,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
// Large arrays, increases over time.
|
||||
balances: b.balances,
|
||||
historicalRoots: b.historicalRoots,
|
||||
historicalSummaries: b.historicalSummaries,
|
||||
validators: b.validators,
|
||||
previousEpochParticipation: b.previousEpochParticipation,
|
||||
currentEpochParticipation: b.currentEpochParticipation,
|
||||
@@ -833,6 +837,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field nativetypes.FieldI
|
||||
return ssz.Uint64Root(b.nextWithdrawalIndex), nil
|
||||
case nativetypes.NextWithdrawalValidatorIndex:
|
||||
return ssz.Uint64Root(uint64(b.nextWithdrawalValidatorIndex)), nil
|
||||
case nativetypes.HistoricalSummaries:
|
||||
return stateutil.HistoricalSummariesRoot(b.historicalSummaries)
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
@@ -71,6 +71,8 @@ func (f FieldIndex) String(_ int) string {
|
||||
return "NextWithdrawalIndex"
|
||||
case NextWithdrawalValidatorIndex:
|
||||
return "NextWithdrawalValidatorIndex"
|
||||
case HistoricalSummaries:
|
||||
return "HistoricalSummaries"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@@ -134,6 +136,8 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 25
|
||||
case NextWithdrawalValidatorIndex:
|
||||
return 26
|
||||
case HistoricalSummaries:
|
||||
return 27
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
@@ -191,4 +195,5 @@ const (
|
||||
LatestExecutionPayloadHeaderCapella
|
||||
NextWithdrawalIndex
|
||||
NextWithdrawalValidatorIndex
|
||||
HistoricalSummaries
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
"field_root_vector.go",
|
||||
"historical_summaries_root.go",
|
||||
"participation_bit_root.go",
|
||||
"pending_attestation_root.go",
|
||||
"reference.go",
|
||||
|
||||
49
beacon-chain/state/stateutil/historical_summaries_root.go
Normal file
49
beacon-chain/state/stateutil/historical_summaries_root.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
max := uint64(fieldparams.HistoricalRootsLength)
|
||||
if uint64(len(summaries)) > max {
|
||||
return [32]byte{}, fmt.Errorf("historical summary exceeds max length %d", max)
|
||||
}
|
||||
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
roots := make([][32]byte, len(summaries))
|
||||
for i := 0; i < len(summaries); i++ {
|
||||
r, err := summaries[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not merkleize historical summary")
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
summariesRoot, err := ssz.BitwiseMerkleize(
|
||||
hasher,
|
||||
roots,
|
||||
uint64(len(roots)),
|
||||
fieldparams.HistoricalRootsLength,
|
||||
)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute historical summaries merkleization")
|
||||
}
|
||||
summariesLenBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(summariesLenBuf, binary.LittleEndian, uint64(len(summaries))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal historical summary length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
summariesLenRoot := make([]byte, 32)
|
||||
copy(summariesLenRoot, summariesLenBuf.Bytes())
|
||||
res := ssz.MixInLength(summariesRoot, summariesLenRoot)
|
||||
return res, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"batch_verifier.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
@@ -133,6 +134,7 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
|
||||
29
beacon-chain/sync/broadcast_bls_changes.go
Normal file
29
beacon-chain/sync/broadcast_bls_changes.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// This routine broadcasts all known BLS changes at the Capella fork.
|
||||
func (s *Service) broadcastBLSChanges(currSlot types.Slot) error {
|
||||
capellaSlotStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
if err != nil {
|
||||
// only possible error is an overflow, so we exit early from the method
|
||||
return nil
|
||||
}
|
||||
if currSlot == capellaSlotStart {
|
||||
changes, err := s.cfg.blsToExecPool.PendingBLSToExecChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLS to execution changes")
|
||||
}
|
||||
for _, ch := range changes {
|
||||
if err := s.cfg.p2p.Broadcast(s.ctx, ch); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast BLS to execution changes.")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
50
beacon-chain/sync/broadcast_bls_changes_test.go
Normal file
50
beacon-chain/sync/broadcast_bls_changes_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mockChain "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
func TestBroadcastBLSChanges(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
s := NewService(context.Background(),
|
||||
WithP2P(mockp2p.NewTestP2P(t)),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithStateNotifier(chainService.StateNotifier()),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
var emptySig [96]byte
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(ðpb.SignedBLSToExecutionChange{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 10,
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
})
|
||||
|
||||
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.broadcastBLSChanges(capellaStart))
|
||||
require.NoError(t, s.broadcastBLSChanges(capellaStart+1))
|
||||
|
||||
}
|
||||
@@ -28,6 +28,12 @@ func (s *Service) forkWatcher() {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
// Broadcast BLS changes at the Capella fork boundary
|
||||
if err := s.broadcastBLSChanges(currSlot); err != nil {
|
||||
log.WithError(err).Error("Unable to broadcast BLS to execution changes")
|
||||
continue
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
slotTicker.Done()
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v3/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
p2ppb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -77,7 +78,7 @@ type blocksFetcher struct {
|
||||
chain blockchainService
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
blocksPerSecond uint64
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
fetchRequests chan *fetchRequestParams
|
||||
@@ -112,11 +113,11 @@ type fetchRequestResponse struct {
|
||||
|
||||
// newBlocksFetcher creates ready to use fetcher.
|
||||
func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetcher {
|
||||
blocksPerSecond := flags.Get().BlockBatchLimit
|
||||
blocksPerPeriod := flags.Get().BlockBatchLimit
|
||||
allowedBlocksBurst := flags.Get().BlockBatchLimitBurstFactor * flags.Get().BlockBatchLimit
|
||||
// Allow fetcher to go almost to the full burst capacity (less a single batch).
|
||||
rateLimiter := leakybucket.NewCollector(
|
||||
float64(blocksPerSecond), int64(allowedBlocksBurst-blocksPerSecond),
|
||||
float64(blocksPerPeriod), int64(allowedBlocksBurst-blocksPerPeriod),
|
||||
blockLimiterPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
capacityWeight := cfg.peerFilterCapacityWeight
|
||||
@@ -132,7 +133,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
chain: cfg.chain,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
blocksPerSecond: uint64(blocksPerSecond),
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
fetchRequests: make(chan *fetchRequestParams, maxPendingRequests),
|
||||
@@ -323,7 +324,7 @@ func (f *blocksFetcher) requestBlocks(
|
||||
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||
}).Debug("Requesting blocks")
|
||||
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {
|
||||
if err := f.waitForBandwidth(pid); err != nil {
|
||||
if err := f.waitForBandwidth(pid, req.Count); err != nil {
|
||||
l.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func (f *blocksFetcher) requestBlocksByRoot(
|
||||
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||
}).Debug("Requesting blocks (by roots)")
|
||||
if f.rateLimiter.Remaining(pid.String()) < int64(len(*req)) {
|
||||
if err := f.waitForBandwidth(pid); err != nil {
|
||||
if err := f.waitForBandwidth(pid, uint64(len(*req))); err != nil {
|
||||
l.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
@@ -363,9 +364,19 @@ func (f *blocksFetcher) requestBlocksByRoot(
|
||||
}
|
||||
|
||||
// waitForBandwidth blocks up until peer's bandwidth is restored.
|
||||
func (f *blocksFetcher) waitForBandwidth(pid peer.ID) error {
|
||||
func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
log.WithField("peer", pid).Debug("Slowing down for rate limit")
|
||||
timer := time.NewTimer(f.rateLimiter.TillEmpty(pid.String()))
|
||||
rem := f.rateLimiter.Remaining(pid.String())
|
||||
if uint64(rem) >= count {
|
||||
// Exit early if we have sufficient capacity
|
||||
return nil
|
||||
}
|
||||
intCount, err := math.Int(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toWait := timeToWait(int64(intCount), rem, f.rateLimiter.Capacity(), f.rateLimiter.TillEmpty(pid.String()))
|
||||
timer := time.NewTimer(toWait)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
@@ -375,3 +386,18 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine how long it will take for us to have the required number of blocks allowed by our rate limiter.
|
||||
// We do this by calculating the duration till the rate limiter can request these blocks without exceeding
|
||||
// the provided bandwidth limits per peer.
|
||||
func timeToWait(wanted, rem, capacity int64, timeTillEmpty time.Duration) time.Duration {
|
||||
// Defensive check if we have more than enough blocks
|
||||
// to request from the peer.
|
||||
if rem >= wanted {
|
||||
return 0
|
||||
}
|
||||
blocksNeeded := wanted - rem
|
||||
currentNumBlks := capacity - rem
|
||||
expectedTime := int64(timeTillEmpty) * blocksNeeded / currentNumBlks
|
||||
return time.Duration(expectedTime)
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
|
||||
remaining, capacity := float64(f.rateLimiter.Remaining(peerID.String())), float64(f.rateLimiter.Capacity())
|
||||
// When capacity is close to exhaustion, allow less performant peer to take a chance.
|
||||
// Otherwise, there's a good chance system will be forced to wait for rate limiter.
|
||||
if remaining < float64(f.blocksPerSecond) {
|
||||
if remaining < float64(f.blocksPerPeriod) {
|
||||
return 0.0
|
||||
}
|
||||
capScore := remaining / capacity
|
||||
|
||||
@@ -590,6 +590,42 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
require.LogsContain(t, hook, fmt.Sprintf("msg=\"Slowing down for rate limit\" peer=%s", p2.PeerID()))
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Step: 1,
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
p2.BHost.SetStreamHandler(protocol, streamHandlerFn)
|
||||
|
||||
burstFactor := uint64(flags.Get().BlockBatchLimitBurstFactor)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 5*time.Second, false)
|
||||
fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
start := time.Now()
|
||||
assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), 10))
|
||||
dur := time.Since(start)
|
||||
assert.Equal(t, true, dur < time.Millisecond, "waited excessively for bandwidth")
|
||||
fetcher.rateLimiter.Add(p2.PeerID().String(), int64(req.Count*burstFactor))
|
||||
start = time.Now()
|
||||
assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), req.Count))
|
||||
dur = time.Since(start)
|
||||
assert.Equal(t, float64(5), dur.Truncate(1*time.Second).Seconds(), "waited excessively for bandwidth")
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
tests := []struct {
|
||||
@@ -860,3 +896,46 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeToWait(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
wanted int64
|
||||
rem int64
|
||||
capacity int64
|
||||
timeTillEmpty time.Duration
|
||||
want time.Duration
|
||||
}{
|
||||
{
|
||||
name: "Limiter has sufficient blocks",
|
||||
wanted: 64,
|
||||
rem: 64,
|
||||
capacity: 320,
|
||||
timeTillEmpty: 200 * time.Second,
|
||||
want: 0 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "Limiter has reached full capacity",
|
||||
wanted: 64,
|
||||
rem: 0,
|
||||
capacity: 640,
|
||||
timeTillEmpty: 60 * time.Second,
|
||||
want: 6 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "Requesting full capacity from peer",
|
||||
wanted: 640,
|
||||
rem: 0,
|
||||
capacity: 640,
|
||||
timeTillEmpty: 60 * time.Second,
|
||||
want: 60 * time.Second,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := timeToWait(tt.wanted, tt.rem, tt.capacity, tt.timeTillEmpty); got != tt.want {
|
||||
t.Errorf("timeToWait() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,6 +374,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
|
||||
t.Run("no diverging blocks", func(t *testing.T) {
|
||||
p2 := connectPeerHavingBlocks(t, p1, knownBlocks, 64, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -385,6 +386,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
forkedSlot := types.Slot(24)
|
||||
altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -397,6 +399,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
t.Run("first block is diverging - no common ancestor", func(t *testing.T) {
|
||||
altBlocks := extendBlockSequence(t, []*ethpb.SignedBeaconBlock{}, 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -408,6 +411,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
forkedSlot := types.Slot(60)
|
||||
altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
|
||||
@@ -179,7 +179,7 @@ func (q *blocksQueue) loop() {
|
||||
if startSlot > startBackSlots {
|
||||
startSlot -= startBackSlots
|
||||
}
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
q.smm.addStateMachine(i)
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
|
||||
m.setState(stateSkipped)
|
||||
return m.state, errSlotIsTooHigh
|
||||
}
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.blocksFetcher.scheduleRequest(ctx, m.start, blocksPerRequest); err != nil {
|
||||
return m.state, err
|
||||
}
|
||||
|
||||
@@ -849,7 +849,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
}
|
||||
@@ -877,7 +877,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
var machineSlots []types.Slot
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -928,7 +928,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
var machineSlots []types.Slot
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -1118,7 +1118,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
p2p.Peers().SetChainState(emptyPeer, chainState)
|
||||
|
||||
startSlot := mc.HeadSlot() + 1
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
machineSlots := make([]types.Slot, 0)
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -1168,7 +1168,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
// its claims with actual blocks.
|
||||
forkedPeer := connectPeerHavingBlocks(t, p2p, chain2, finalizedSlot, p2p.Peers())
|
||||
startSlot := mc.HeadSlot() + 1
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
machineSlots := make([]types.Slot, 0)
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
|
||||
@@ -22,7 +22,7 @@ func (q *blocksQueue) resetFromFork(fork *forkData) error {
|
||||
return errors.New("invalid first block in fork data")
|
||||
}
|
||||
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.smm.removeAllStateMachines(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (q *blocksQueue) resetFromFork(fork *forkData) error {
|
||||
// long periods with skipped slots).
|
||||
func (q *blocksQueue) resetFromSlot(ctx context.Context, startSlot types.Slot) error {
|
||||
// Shift start position of all the machines except for the last one.
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.smm.removeAllStateMachines(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -77,7 +78,11 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
if blk.Block().IsBlinded() {
|
||||
blk, err = s.cfg.executionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get reconstruct full bellatrix block from blinded body")
|
||||
if errors.Is(err, execution.EmptyBlockHash) {
|
||||
log.WithError(err).Warn("Could not reconstruct block from header with syncing execution client. Waiting to complete syncing")
|
||||
} else {
|
||||
log.WithError(err).Error("Could not get reconstruct full block from blinded body")
|
||||
}
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,15 +4,23 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/operation"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) blsToExecutionChangeSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
func (s *Service) blsToExecutionChangeSubscriber(_ context.Context, msg proto.Message) error {
|
||||
blsMsg, ok := msg.(*ethpb.SignedBLSToExecutionChange)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", ðpb.SignedBLSToExecutionChange{}, msg)
|
||||
}
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.BLSToExecutionChangeReceived,
|
||||
Data: &opfeed.BLSToExecutionChangeReceivedData{
|
||||
Change: blsMsg,
|
||||
},
|
||||
})
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(blsMsg)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -60,7 +60,11 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, 6, len(h.allTopics()))
|
||||
topic = fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, 7, len(h.allTopics()))
|
||||
|
||||
// Remove multiple topics
|
||||
topic = fmt.Sprintf(p2p.AttesterSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
@@ -76,7 +80,7 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, true, h.digestExists(digest))
|
||||
assert.Equal(t, 3, len(h.allTopics()))
|
||||
assert.Equal(t, 4, len(h.allTopics()))
|
||||
|
||||
// Remove remaining topics.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
@@ -91,6 +95,10 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, false, h.digestExists(digest))
|
||||
assert.Equal(t, 0, len(h.allTopics()))
|
||||
}
|
||||
|
||||
@@ -66,10 +66,7 @@ container_image(
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
"gcr.io/prylabs-dev/beacon-chain:evil-shapella": ":image_with_creation_time",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
|
||||
@@ -5,6 +5,7 @@ package flags
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -246,4 +247,10 @@ var (
|
||||
"WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. " +
|
||||
"Incorrect usage will result in your node experience consensus failure.",
|
||||
}
|
||||
// SlasherDirFlag defines a path on disk where the slasher database is stored.
|
||||
SlasherDirFlag = &cli.StringFlag{
|
||||
Name: "slasher-datadir",
|
||||
Usage: "Directory for the slasher database",
|
||||
Value: cmd.DefaultDataDir(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -130,6 +130,7 @@ var appFlags = []cli.Flag{
|
||||
checkpoint.RemoteURL,
|
||||
genesis.StatePath,
|
||||
genesis.BeaconAPIURL,
|
||||
flags.SlasherDirFlag,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -126,6 +126,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.MaxBuilderEpochMissedSlots,
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
flags.EngineEndpointTimeoutSeconds,
|
||||
flags.SlasherDirFlag,
|
||||
checkpoint.BlockPath,
|
||||
checkpoint.StatePath,
|
||||
checkpoint.RemoteURL,
|
||||
|
||||
@@ -35,7 +35,7 @@ your flag since you're going to invert the flag in a later step. i.e you will us
|
||||
later. For example, `--enable-my-feature`. Additionally, [create a feature flag tracking issue](https://github.com/prysmaticlabs/prysm/issues/new?template=feature_flag.md)
|
||||
for your feature using the appropriate issue template.
|
||||
2. Use the feature throughout the application to enable your new functionality and be sure to write
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new funcitonality without losing
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new functionality without losing
|
||||
coverage on the existing functionality. This is considered an opt-in feature flag. Example usage:
|
||||
```go
|
||||
func someExistingMethod(ctx context.Context) error {
|
||||
@@ -58,4 +58,4 @@ the config value in shared/featureconfig/config.go.
|
||||
deprecate the opt-out feature flag, delete the config field from shared/featureconfig/config.go,
|
||||
delete any deprecated / obsolete code paths.
|
||||
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
|
||||
@@ -174,6 +174,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
disableBroadcastSlashingFlag,
|
||||
enableSlasherFlag,
|
||||
enableHistoricalSpaceRepresentation,
|
||||
disableStakinContractCheck,
|
||||
disablePullTips,
|
||||
disableVecHTR,
|
||||
disableForkChoiceDoublyLinkedTree,
|
||||
|
||||
@@ -149,8 +149,6 @@ type BeaconChainConfig struct {
|
||||
AltairForkEpoch types.Epoch `yaml:"ALTAIR_FORK_EPOCH" spec:"true"` // AltairForkEpoch is used to represent the assigned fork epoch for altair.
|
||||
BellatrixForkVersion []byte `yaml:"BELLATRIX_FORK_VERSION" spec:"true"` // BellatrixForkVersion is used to represent the fork version for bellatrix.
|
||||
BellatrixForkEpoch types.Epoch `yaml:"BELLATRIX_FORK_EPOCH" spec:"true"` // BellatrixForkEpoch is used to represent the assigned fork epoch for bellatrix.
|
||||
ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding.
|
||||
ShardingForkEpoch types.Epoch `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding.
|
||||
CapellaForkVersion []byte `yaml:"CAPELLA_FORK_VERSION" spec:"true"` // CapellaForkVersion is used to represent the fork version for capella.
|
||||
CapellaForkEpoch types.Epoch `yaml:"CAPELLA_FORK_EPOCH" spec:"true"` // CapellaForkEpoch is used to represent the assigned fork epoch for capella.
|
||||
|
||||
@@ -221,22 +219,18 @@ func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
|
||||
func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]types.Epoch {
|
||||
fvs := map[[fieldparams.VersionLength]byte]types.Epoch{}
|
||||
// Set Genesis fork data.
|
||||
fvs[bytesutil.ToBytes4(b.GenesisForkVersion)] = b.GenesisEpoch
|
||||
// Set Altair fork data.
|
||||
fvs[bytesutil.ToBytes4(b.AltairForkVersion)] = b.AltairForkEpoch
|
||||
// Set Bellatrix fork data.
|
||||
fvs[bytesutil.ToBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch
|
||||
fvs[bytesutil.ToBytes4(b.CapellaForkVersion)] = b.CapellaForkEpoch
|
||||
return fvs
|
||||
}
|
||||
|
||||
func configForkNames(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]string {
|
||||
fvn := map[[fieldparams.VersionLength]byte]string{}
|
||||
// Set Genesis fork data.
|
||||
fvn[bytesutil.ToBytes4(b.GenesisForkVersion)] = "phase0"
|
||||
// Set Altair fork data.
|
||||
fvn[bytesutil.ToBytes4(b.AltairForkVersion)] = "altair"
|
||||
// Set Bellatrix fork data.
|
||||
fvn[bytesutil.ToBytes4(b.BellatrixForkVersion)] = "bellatrix"
|
||||
fvn[bytesutil.ToBytes4(b.CapellaForkVersion)] = "capella"
|
||||
return fvn
|
||||
}
|
||||
|
||||
@@ -170,8 +170,6 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.AltairForkEpoch, actual.AltairForkEpoch)
|
||||
require.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion)
|
||||
require.DeepEqual(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch)
|
||||
require.DeepEqual(t, expected.ShardingForkVersion, actual.ShardingForkVersion)
|
||||
require.DeepEqual(t, expected.ShardingForkEpoch, actual.ShardingForkEpoch)
|
||||
require.DeepEqual(t, expected.ForkVersionSchedule, actual.ForkVersionSchedule)
|
||||
require.DeepEqual(t, expected.SafetyDecay, actual.SafetyDecay)
|
||||
require.DeepEqual(t, expected.TimelySourceFlagIndex, actual.TimelySourceFlagIndex)
|
||||
|
||||
@@ -9,7 +9,7 @@ func InteropConfig() *BeaconChainConfig {
|
||||
c.GenesisForkVersion = []byte{0, 0, 0, 235}
|
||||
c.AltairForkVersion = []byte{1, 0, 0, 235}
|
||||
c.BellatrixForkVersion = []byte{2, 0, 0, 235}
|
||||
c.ShardingForkVersion = []byte{3, 0, 0, 235}
|
||||
c.CapellaForkVersion = []byte{3, 0, 0, 235}
|
||||
|
||||
c.InitializeForkSchedule()
|
||||
return c
|
||||
|
||||
@@ -196,10 +196,9 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("DEPOSIT_NETWORK_ID: %d", cfg.DepositNetworkID),
|
||||
fmt.Sprintf("ALTAIR_FORK_EPOCH: %d", cfg.AltairForkEpoch),
|
||||
fmt.Sprintf("ALTAIR_FORK_VERSION: %#x", cfg.AltairForkVersion),
|
||||
fmt.Sprintf("CAPELLA_FORK_VERSION: %#x", cfg.CapellaForkVersion),
|
||||
fmt.Sprintf("BELLATRIX_FORK_EPOCH: %d", cfg.BellatrixForkEpoch),
|
||||
fmt.Sprintf("BELLATRIX_FORK_VERSION: %#x", cfg.BellatrixForkVersion),
|
||||
fmt.Sprintf("SHARDING_FORK_EPOCH: %d", cfg.ShardingForkEpoch),
|
||||
fmt.Sprintf("SHARDING_FORK_VERSION: %#x", cfg.ShardingForkVersion),
|
||||
fmt.Sprintf("INACTIVITY_SCORE_BIAS: %d", cfg.InactivityScoreBias),
|
||||
fmt.Sprintf("INACTIVITY_SCORE_RECOVERY_RATE: %d", cfg.InactivityScoreRecoveryRate),
|
||||
fmt.Sprintf("TERMINAL_TOTAL_DIFFICULTY: %s", cfg.TerminalTotalDifficulty),
|
||||
|
||||
@@ -107,13 +107,15 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
|
||||
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
|
||||
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.TerminalTotalDifficulty, actual.TerminalTotalDifficulty, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.AltairForkEpoch, actual.AltairForkEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.SqrRootSlotsPerEpoch, actual.SqrRootSlotsPerEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.AltairForkVersion, actual.AltairForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.TerminalTotalDifficulty, actual.TerminalTotalDifficulty, "%s: TerminalTotalDifficulty", name)
|
||||
assert.Equal(t, expected.AltairForkEpoch, actual.AltairForkEpoch, "%s: AltairForkEpoch", name)
|
||||
assert.Equal(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch, "%s: BellatrixForkEpoch", name)
|
||||
assert.Equal(t, expected.CapellaForkEpoch, actual.CapellaForkEpoch, "%s: CapellaForkEpoch", name)
|
||||
assert.Equal(t, expected.SqrRootSlotsPerEpoch, actual.SqrRootSlotsPerEpoch, "%s: SqrRootSlotsPerEpoch", name)
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, expected.AltairForkVersion, actual.AltairForkVersion, "%s: AltairForkVersion", name)
|
||||
assert.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion, "%s: BellatrixForkVersion", name)
|
||||
assert.DeepEqual(t, expected.CapellaForkVersion, actual.CapellaForkVersion, "%s: CapellaForkVersion", name)
|
||||
|
||||
assertYamlFieldsMatch(t, name, fields, expected, actual)
|
||||
}
|
||||
@@ -324,6 +326,9 @@ func fieldsFromYamls(t *testing.T, fps []string) []string {
|
||||
require.NoError(t, yaml.Unmarshal(yamlFile, &m))
|
||||
|
||||
for k := range m {
|
||||
if k == "SHARDING_FORK_VERSION" || k == "SHARDING_FORK_EPOCH" {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user