mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
61 Commits
capella-de
...
e2e-at-cap
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
317c09c40a | ||
|
|
2c2f56cc7e | ||
|
|
58acdb5136 | ||
|
|
fa8a6d9d17 | ||
|
|
fa2b64f702 | ||
|
|
6f5e35f08a | ||
|
|
743037efb5 | ||
|
|
301970cf5a | ||
|
|
01ae2fe7be | ||
|
|
1cfe5988e6 | ||
|
|
a40cadab32 | ||
|
|
79d6ce45ad | ||
|
|
73cd7df679 | ||
|
|
d084d5a979 | ||
|
|
db6b1c15c4 | ||
|
|
7c9bff489e | ||
|
|
1fca73d761 | ||
|
|
fbafbdd62c | ||
|
|
75d98cf9af | ||
|
|
9f5f807303 | ||
|
|
96401e734e | ||
|
|
5f6147ecf7 | ||
|
|
5480d607ac | ||
|
|
38f0a81526 | ||
|
|
b97d2827e9 | ||
|
|
ad680d3417 | ||
|
|
047cae5e8b | ||
|
|
179252faea | ||
|
|
f93b82ee89 | ||
|
|
508e1ad005 | ||
|
|
8f06f72eed | ||
|
|
046882401e | ||
|
|
ce339bc22b | ||
|
|
1b2d917389 | ||
|
|
eb6b811071 | ||
|
|
cf71dbdf97 | ||
|
|
1ec5d45d8d | ||
|
|
9328e9af1f | ||
|
|
4762fd71de | ||
|
|
daa4fd2b72 | ||
|
|
5bdffb82e3 | ||
|
|
aabcaac619 | ||
|
|
769daffb73 | ||
|
|
16e6e0de6c | ||
|
|
396fc3dc7e | ||
|
|
d86a452b15 | ||
|
|
7fa3ebfaa8 | ||
|
|
81b9eceb50 | ||
|
|
505ff6ea3d | ||
|
|
2b5125c7bc | ||
|
|
9f3bb623ec | ||
|
|
b10a95097e | ||
|
|
1e3a55c6a6 | ||
|
|
116f3ac265 | ||
|
|
bbe003720c | ||
|
|
e957edcb12 | ||
|
|
684022fa69 | ||
|
|
a7a64632b1 | ||
|
|
c89ab764e3 | ||
|
|
375a76d6c9 | ||
|
|
bad7c6a94e |
@@ -1,4 +1,4 @@
|
||||
# Dependency Managagement in Prysm
|
||||
# Dependency Management in Prysm
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
|
||||
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
||||
pb.go files at build time when file changes are present in any protobuf definition file or after
|
||||
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
||||
the following scripts often to ensure their generated files are up to date. Further more, Prysm
|
||||
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
|
||||
generates SSZ marshal related code based on defined data structures. These generated files must
|
||||
also be updated and checked in as frequently.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -188,7 +188,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-rc.0"
|
||||
consensus_spec_version = "v1.3.0-rc.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -204,7 +204,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
#sha256 = "a92c41058dc17ced811cc85570cd6f8af761aedfcbd2dd7dd4fb64ac961d76f9",
|
||||
sha256 = "3d6fadb64674eb64a84fae6c2efa9949231ea91e7cb74ada9214097323e86569",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -220,7 +220,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
#sha256 = "49a7944da92429ac8f41347f19837762247cdbf00e628c285d1b826e58e4096d",
|
||||
sha256 = "54ffbcab1e77316a280e6f5a64c6ed62351e8f5678e6fa49340e49b9b5575e8e",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -236,7 +236,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
#sha256 = "d19673e9cd55e0c8d45eefc33b60978e14c166d0e891976fcaa114085312adcb",
|
||||
sha256 = "bb06d30ca533dc97d45f2367916ba9ff1b5af52f08a9d8c33bb7b1a61254094e",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -251,7 +251,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
#sha256 = "a72b7457c403f6b76567d4d7bec19d01bedf7d5ef1d6f2c3a9e09ee86d401a14",
|
||||
sha256 = "9d22246c00ec3907ef8dc3ddccdfe6f7153ce46df73deee0a0176fe7e4aa1126",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -67,6 +67,10 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -79,7 +83,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -82,7 +82,11 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), aState.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), aState.HistoricalRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := aState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), aState.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), aState.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), aState.Eth1DepositIndex())
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
var BLSChangesSigningDomain = blsChangesSigningDomain
|
||||
|
||||
@@ -37,7 +37,7 @@ func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
return block
|
||||
}
|
||||
|
||||
var ErrUnrecognizedState = errors.New("uknonwn underlying type for state.BeaconState value")
|
||||
var ErrUnrecognizedState = errors.New("unknown underlying type for state.BeaconState value")
|
||||
|
||||
func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfaces.SignedBeaconBlock, error) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,6 +113,38 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateCapella:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyCapella{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
@@ -162,25 +161,6 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// blsChangesSigningDomain returns the signing domain to check BLSToExecutionChange messages against.
|
||||
func blsChangesSigningDomain(st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
var epoch types.Epoch
|
||||
var fork *ethpb.Fork
|
||||
if st.Version() < version.Capella {
|
||||
epoch = params.BeaconConfig().CapellaForkEpoch
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: epoch,
|
||||
}
|
||||
|
||||
} else {
|
||||
epoch = slots.ToEpoch(st.Slot())
|
||||
fork = st.Fork()
|
||||
}
|
||||
return signing.Domain(fork, epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BLSChangesSignatureBatch(
|
||||
st state.ReadOnlyBeaconState,
|
||||
changes []*ethpb.SignedBLSToExecutionChange,
|
||||
@@ -195,9 +175,10 @@ func BLSChangesSignatureBatch(
|
||||
Messages: make([][32]byte, len(changes)),
|
||||
Descriptions: make([]string, len(changes)),
|
||||
}
|
||||
domain, err := blsChangesSigningDomain(st)
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not compute signing domain")
|
||||
}
|
||||
for i, change := range changes {
|
||||
batch.Signatures[i] = change.Signature
|
||||
@@ -223,7 +204,8 @@ func VerifyBLSChangeSignature(
|
||||
st state.BeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
domain, err := blsChangesSigningDomain(st)
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing domain")
|
||||
}
|
||||
|
||||
@@ -791,6 +791,71 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
|
||||
require.NoError(t, err)
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
savedConfig := cfg.Copy()
|
||||
@@ -847,7 +912,7 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
spc := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -859,7 +924,7 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(stc, time.CurrentEpoch(stc), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
signature, err := signing.ComputeDomainAndSign(stc, 0, message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
@@ -879,51 +944,3 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
func TestBLSChangesSigningDomain(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
savedConfig := cfg.Copy()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch.AddEpoch(2)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
capellaDomain := []byte{0xa, 0x0, 0x0, 0x0, 0xe7, 0xb4, 0xbb, 0x67, 0x55, 0x1d, 0xde, 0x95, 0x89, 0xc1, 0x55, 0x3d, 0xfd, 0xa3, 0x7a, 0x94, 0x2a, 0x18, 0xca, 0xf1, 0x84, 0xf9, 0xcc, 0x16, 0x29, 0xd2, 0x5c, 0xf5}
|
||||
|
||||
t.Run("pre-Capella fork", func(t *testing.T) {
|
||||
spb := ðpb.BeaconStateBellatrix{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
|
||||
st, err := state_native.InitializeFromProtoBellatrix(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
domain, err := blocks.BLSChangesSigningDomain(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, capellaDomain, domain)
|
||||
})
|
||||
t.Run("post-Capella fork", func(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
domain, err := blocks.BLSChangesSigningDomain(st)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, capellaDomain, domain)
|
||||
})
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,10 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -54,7 +58,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
@@ -90,6 +94,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
},
|
||||
NextWithdrawalIndex: 0,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
HistoricalSummaries: make([]*ethpb.HistoricalSummary, 0),
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeCapella(s)
|
||||
|
||||
@@ -25,7 +25,6 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
@@ -98,4 +97,8 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// For Capella state, per spec, historical summaries is updated instead of historical roots.
|
||||
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
@@ -370,7 +370,7 @@ func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, er
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.AppendHistoricalSummariesUpdate(ðpb.HistoricalSummary{BlockSummaryRoot: br[:], StateSummaryRoot: sr[:]}); err != nil {
|
||||
if err := state.AppendHistoricalSummaries(ðpb.HistoricalSummary{BlockSummaryRoot: br[:], StateSummaryRoot: sr[:]}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -276,7 +276,9 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) {
|
||||
assert.DeepNotEqual(t, params.BeaconConfig().ZeroHash[:], mix, "latest RANDAO still zero hashes")
|
||||
|
||||
// Verify historical root accumulator was appended.
|
||||
assert.Equal(t, 1, len(newS.HistoricalRoots()), "Unexpected slashed balance")
|
||||
roots, err := newS.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(roots), "Unexpected slashed balance")
|
||||
currAtt, err := newS.CurrentEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, currAtt, "Nil value stored in current epoch attestations instead of empty slice")
|
||||
@@ -458,7 +460,7 @@ func TestProcessSlashings_BadValue(t *testing.T) {
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
st func() state.BeaconState
|
||||
@@ -471,7 +473,9 @@ func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
require.Equal(t, 0, len(st.HistoricalRoots()))
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(roots))
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -483,7 +487,9 @@ func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
require.Equal(t, 1, len(st.HistoricalRoots()))
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
|
||||
b := ðpb.HistoricalBatch{
|
||||
BlockRoots: st.BlockRoots(),
|
||||
@@ -491,7 +497,10 @@ func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
}
|
||||
r, err := b.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r[:], st.HistoricalRoots()[0])
|
||||
require.DeepEqual(t, r[:], roots[0])
|
||||
|
||||
_, err = st.HistoricalSummaries()
|
||||
require.ErrorContains(t, "HistoricalSummaries is not supported for phase0", err)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -503,7 +512,9 @@ func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
require.Equal(t, 1, len(st.HistoricalSummaries()))
|
||||
summaries, err := st.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(summaries))
|
||||
|
||||
br, err := stateutil.ArraysRoot(st.BlockRoots(), fieldparams.BlockRootsLength)
|
||||
require.NoError(t, err)
|
||||
@@ -513,7 +524,10 @@ func TestProcessHistoricalRootsUpdate(t *testing.T) {
|
||||
BlockSummaryRoot: br[:],
|
||||
StateSummaryRoot: sr[:],
|
||||
}
|
||||
require.DeepEqual(t, b, st.HistoricalSummaries()[0])
|
||||
require.DeepEqual(t, b, summaries[0])
|
||||
hrs, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hrs, [][]byte{})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -35,6 +35,10 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -47,7 +51,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -24,7 +24,11 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
|
||||
@@ -19,6 +19,9 @@ const (
|
||||
|
||||
// SyncCommitteeContributionReceived is sent after a sync committee contribution object has been received.
|
||||
SyncCommitteeContributionReceived
|
||||
|
||||
// BLSToExecutionChangeReceived is sent after a BLS to execution change object has been received from gossip or rpc.
|
||||
BLSToExecutionChangeReceived
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -44,3 +47,8 @@ type SyncCommitteeContributionReceivedData struct {
|
||||
// Contribution is the sync committee contribution object.
|
||||
Contribution *ethpb.SignedContributionAndProof
|
||||
}
|
||||
|
||||
// BLSToExecutionChangeReceivedData is the data sent with BLSToExecutionChangeReceived events.
|
||||
type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
@@ -48,6 +50,37 @@ func testGenesisDataSaved(t *testing.T, db iface.Database) {
|
||||
require.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
|
||||
}
|
||||
|
||||
func TestLoadCapellaFromFile(t *testing.T) {
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
// This state fixture is from a hive testnet, `0a` is the suffix they are using in their fork versions.
|
||||
suffix, err := hex.DecodeString("0a")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(suffix))
|
||||
reversioned := cfg.Copy()
|
||||
params.FillTestVersions(reversioned, suffix[0])
|
||||
reversioned.CapellaForkEpoch = 0
|
||||
require.Equal(t, [4]byte{3, 0, 0, 10}, bytesutil.ToBytes4(reversioned.CapellaForkVersion))
|
||||
reversioned.ConfigName = "capella-genesis-test"
|
||||
undo, err := params.SetActiveWithUndo(reversioned)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
fp := "testdata/capella_genesis.ssz"
|
||||
rfp, err := bazel.Runfile(fp)
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
sb, err := os.ReadFile(fp)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// for this test to work, we need the active config to have these properties:
|
||||
// - fork version schedule that matches mainnnet.genesis.ssz
|
||||
@@ -57,7 +90,7 @@ func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// uses the mainnet fork schedule. construct the differently named mainnet config and set it active.
|
||||
// finally, revert all this at the end of the test.
|
||||
|
||||
// first get the real mainnet out of the way by overwriting it schedule.
|
||||
// first get the real mainnet out of the way by overwriting its schedule.
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
cfg = cfg.Copy()
|
||||
|
||||
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
Normal file
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
Normal file
Binary file not shown.
@@ -86,6 +86,8 @@ type EngineCaller interface {
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
@@ -471,6 +473,10 @@ func (s *Service) ReconstructFullBlock(
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = blindedBlock.Version()
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
if err != nil {
|
||||
|
||||
@@ -54,6 +54,7 @@ func (RPCClientBad) CallContext(context.Context, interface{}, string, ...interfa
|
||||
}
|
||||
|
||||
func TestClient_IPC(t *testing.T) {
|
||||
t.Skip("Skipping IPC test to support Capella devnet-3")
|
||||
server := newTestIPCServer(t)
|
||||
defer server.Stop()
|
||||
rpcClient := rpc.DialInProc(server)
|
||||
@@ -147,13 +148,15 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_HTTP(t *testing.T) {
|
||||
t.Skip("Skipping HTTP test to support Capella devnet-3")
|
||||
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
@@ -618,7 +621,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := service.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
|
||||
resp, err := service.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
@@ -23,6 +24,7 @@ type EngineClient struct {
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
@@ -54,7 +56,10 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, _ types.Slot) (interfaces.ExecutionData, error) {
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s types.Slot) (interfaces.ExecutionData, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella)
|
||||
}
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -52,8 +52,8 @@ func (c *AttCaches) SaveUnaggregatedAttestations(atts []*ethpb.Attestation) erro
|
||||
|
||||
// UnaggregatedAttestations returns all the unaggregated attestations in cache.
|
||||
func (c *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
atts := make([]*ethpb.Attestation, 0, len(unAggregatedAtts))
|
||||
for _, att := range unAggregatedAtts {
|
||||
|
||||
@@ -59,8 +59,8 @@ func (s *Service) pruneExpiredAtts() {
|
||||
if err := s.cfg.Pool.DeleteBlockAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not delete expired block attestation")
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -416,6 +416,8 @@ func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http
|
||||
data = &EventChainReorgJson{}
|
||||
case events.SyncCommitteeContributionTopic:
|
||||
data = &SignedContributionAndProofJson{}
|
||||
case events.BLSToExecutionChangeTopic:
|
||||
data = &SignedBLSToExecutionChangeJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
|
||||
@@ -864,6 +864,7 @@ type BeaconStateCapellaJson struct {
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
@@ -1037,6 +1038,11 @@ type ForkChoiceDumpJson struct {
|
||||
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
|
||||
}
|
||||
|
||||
type HistoricalSummaryJson struct {
|
||||
BlockSummaryRoot string `json:"block_summary_root" hex:"true"`
|
||||
StateSummaryRoot string `json:"state_summary_root" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
@@ -115,6 +115,7 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
|
||||
@@ -1195,555 +1195,3 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlindedBlockSSZ(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBeaconBlockAltair()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBlindedBeaconBlockBellatrix()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
},
|
||||
OptimisticModeFetcher: &mock.ChainService{},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
b2 := util.NewBlindedBeaconBlockCapella()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
},
|
||||
OptimisticModeFetcher: &mock.ChainService{},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
alphaServer := &validator.Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: &mock.MockBlockNotifier{},
|
||||
}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
V1Alpha1ValidatorServer: alphaServer,
|
||||
}
|
||||
req := util.NewBlindedBeaconBlockBellatrix()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
t.Skip("This test needs Capella fork version configured properly")
|
||||
|
||||
// INFO: This code block can be removed once Capella
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.CapellaForkVersion)] = cfg.BellatrixForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockCapella()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
alphaServer := &validator.Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: &mock.MockBlockNotifier{},
|
||||
}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
V1Alpha1ValidatorServer: alphaServer,
|
||||
}
|
||||
req := util.NewBlindedBeaconBlockCapella()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "capella")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(req)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Block.Block},
|
||||
Signature: v1Block.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(req.Block)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, req)
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Block},
|
||||
Signature: req.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
|
||||
transactionsRoot, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
alphaServer := &validator.Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: &mock.MockBlockNotifier{},
|
||||
}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
V1Alpha1ValidatorServer: alphaServer,
|
||||
}
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 5
|
||||
blk.Block.ParentRoot = bsRoot[:]
|
||||
blk.Block.Body.ExecutionPayload.Transactions = transactions
|
||||
blindedBlk := util.NewBlindedBeaconBlockBellatrixV2()
|
||||
blindedBlk.Message.Slot = 5
|
||||
blindedBlk.Message.ParentRoot = bsRoot[:]
|
||||
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blindedBlk.Message},
|
||||
Signature: blindedBlk.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
|
||||
transactionsRoot, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: bytesutil.PadTo([]byte("address1"), 20),
|
||||
Amount: 1,
|
||||
},
|
||||
{
|
||||
Index: 2,
|
||||
ValidatorIndex: 2,
|
||||
Address: bytesutil.PadTo([]byte("address2"), 20),
|
||||
Amount: 2,
|
||||
},
|
||||
}
|
||||
withdrawalsRoot, err := ssz.WithdrawalSliceRoot(hash.CustomSHA256Hasher(), withdrawals, 16)
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockCapella()
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesis)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
alphaServer := &validator.Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: &mock.MockBlockNotifier{},
|
||||
}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
V1Alpha1ValidatorServer: alphaServer,
|
||||
}
|
||||
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
blk.Block.Slot = 5
|
||||
blk.Block.ParentRoot = bsRoot[:]
|
||||
blk.Block.Body.ExecutionPayload.Transactions = transactions
|
||||
blk.Block.Body.ExecutionPayload.Withdrawals = withdrawals
|
||||
blindedBlk := util.NewBlindedBeaconBlockCapellaV2()
|
||||
blindedBlk.Message.Slot = 5
|
||||
blindedBlk.Message.ParentRoot = bsRoot[:]
|
||||
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
|
||||
blindedBlk.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: blindedBlk.Message},
|
||||
Signature: blindedBlk.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -49,8 +50,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.AltairForkEpoch = 100
|
||||
config.BellatrixForkVersion = []byte("BellatrixForkVersion")
|
||||
config.BellatrixForkEpoch = 101
|
||||
config.ShardingForkVersion = []byte("ShardingForkVersion")
|
||||
config.ShardingForkEpoch = 102
|
||||
config.CapellaForkVersion = []byte("CapellaForkVersion")
|
||||
config.CapellaForkEpoch = 103
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
@@ -138,7 +137,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
assert.Equal(t, 103, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -203,10 +202,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
|
||||
case "BELLATRIX_FORK_EPOCH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "SHARDING_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ShardingForkVersion")), v)
|
||||
case "SHARDING_FORK_EPOCH":
|
||||
assert.Equal(t, "102", v)
|
||||
case "CAPELLA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
@@ -425,6 +420,6 @@ func TestForkSchedule_CorrectNumberOfForks(t *testing.T) {
|
||||
s := &Server{}
|
||||
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
// Genesis and Altair.
|
||||
assert.Equal(t, 3, len(resp.Data))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
}
|
||||
|
||||
@@ -334,6 +334,12 @@ func (bs *Server) SubmitSignedBLSToExecutionChanges(ctx context.Context, req *et
|
||||
})
|
||||
continue
|
||||
}
|
||||
bs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: alphaChange,
|
||||
},
|
||||
})
|
||||
bs.BLSChangesPool.InsertBLSToExecChange(alphaChange)
|
||||
if st.Version() >= version.Capella {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaChange); err != nil {
|
||||
|
||||
@@ -1205,8 +1205,8 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -1342,8 +1342,8 @@ func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
|
||||
|
||||
spc := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
@@ -1405,8 +1405,8 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
|
||||
|
||||
spb := ðpbv1alpha1.BeaconStateCapella{
|
||||
Fork: ðpbv1alpha1.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ const (
|
||||
ChainReorgTopic = "chain_reorg"
|
||||
// SyncCommitteeContributionTopic represents a new sync committee contribution event topic.
|
||||
SyncCommitteeContributionTopic = "contribution_and_proof"
|
||||
// BLSToExecutionChangeTopic represents a new received BLS to execution change event topic.
|
||||
BLSToExecutionChangeTopic = "bls_to_execution_change"
|
||||
)
|
||||
|
||||
var casesHandled = map[string]bool{
|
||||
@@ -43,6 +45,7 @@ var casesHandled = map[string]bool{
|
||||
FinalizedCheckpointTopic: true,
|
||||
ChainReorgTopic: true,
|
||||
SyncCommitteeContributionTopic: true,
|
||||
BLSToExecutionChangeTopic: true,
|
||||
}
|
||||
|
||||
// StreamEvents allows requesting all events from a set of topics defined in the Ethereum consensus API standard.
|
||||
@@ -178,6 +181,16 @@ func handleBlockOperationEvents(
|
||||
}
|
||||
v2Data := migration.V1Alpha1SignedContributionAndProofToV2(contributionData.Contribution)
|
||||
return streamData(stream, SyncCommitteeContributionTopic, v2Data)
|
||||
case operation.BLSToExecutionChangeReceived:
|
||||
if _, ok := requestedTopics[BLSToExecutionChangeTopic]; !ok {
|
||||
return nil
|
||||
}
|
||||
changeData, ok := event.Data.(*operation.BLSToExecutionChangeReceivedData)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
v2Change := migration.V1Alpha1SignedBLSToExecChangeToV2(changeData.Change)
|
||||
return streamData(stream, BLSToExecutionChangeTopic, v2Change)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -238,6 +238,43 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
feed: srv.OperationNotifier.OperationFeed(),
|
||||
})
|
||||
})
|
||||
t.Run(BLSToExecutionChangeTopic, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
wantedChangeV1alpha1 := ð.SignedBLSToExecutionChange{
|
||||
Message: ð.BLSToExecutionChange{
|
||||
ValidatorIndex: 1,
|
||||
FromBlsPubkey: []byte("from"),
|
||||
ToExecutionAddress: []byte("to"),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
wantedChange := migration.V1Alpha1SignedBLSToExecChangeToV2(wantedChangeV1alpha1)
|
||||
genericResponse, err := anypb.New(wantedChange)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedMessage := &gateway.EventSource{
|
||||
Event: BLSToExecutionChangeTopic,
|
||||
Data: genericResponse,
|
||||
}
|
||||
|
||||
assertFeedSendAndReceive(ctx, &assertFeedArgs{
|
||||
t: t,
|
||||
srv: srv,
|
||||
topics: []string{BLSToExecutionChangeTopic},
|
||||
stream: mockStream,
|
||||
shouldReceive: wantedMessage,
|
||||
itemToSend: &feed.Event{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: wantedChangeV1alpha1,
|
||||
},
|
||||
},
|
||||
feed: srv.OperationNotifier.OperationFeed(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
|
||||
@@ -502,7 +502,7 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
}
|
||||
b, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.Unavailable, "Could not get block from prysm API")
|
||||
}
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.GetBlindedBellatrix())
|
||||
if err != nil {
|
||||
|
||||
@@ -689,18 +689,20 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
mockChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
mockExecutionChain := &mockExecution.Chain{}
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -797,19 +799,21 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
mochChainService := &mockChain.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
mockExecutionChain := &mockExecution.Chain{}
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: mochChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mochChainService,
|
||||
HeadUpdater: mochChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
HeadFetcher: mochChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mochChainService,
|
||||
HeadUpdater: mochChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
TimeFetcher: mochChainService,
|
||||
OptimisticModeFetcher: mochChainService,
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -2105,6 +2109,14 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t,
|
||||
db.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{idx},
|
||||
[]*ethpbalpha.ValidatorRegistrationV1{{FeeRecipient: make([]byte, 20), Pubkey: make([]byte, 48)}}))
|
||||
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
"assignments.go",
|
||||
"attestations.go",
|
||||
"blocks.go",
|
||||
"blstoexec.go",
|
||||
"committees.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// SubmitBLSToExecutionChange receives a withdrawal credential change object via
|
||||
// RPC and injects it into the beacon node's operations pool.
|
||||
// Submission into this pool does not guarantee inclusion into a beacon block. If the object passes validation
|
||||
// the node MUST broadcast it
|
||||
func (bs *Server) SubmitBLSToExecutionChange(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedBLSToExecutionChange,
|
||||
) (*ethpb.BLSToExecutionChangeResponse, error) {
|
||||
bs.BLSChangesPool.InsertBLSToExecChange(req)
|
||||
if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast SigledBLSToExecutionChange object: %v", err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"proposer_attestations.go",
|
||||
"proposer_bellatrix.go",
|
||||
"proposer_builder.go",
|
||||
"proposer_capella.go",
|
||||
"proposer_deposits.go",
|
||||
"proposer_empty_block.go",
|
||||
"proposer_eth1data.go",
|
||||
@@ -43,14 +44,13 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -125,6 +125,7 @@ common_deps = [
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -174,6 +175,7 @@ go_test(
|
||||
"attester_test.go",
|
||||
"blocks_test.go",
|
||||
"exit_test.go",
|
||||
"proposer_altair_test.go",
|
||||
"proposer_attestations_test.go",
|
||||
"proposer_bellatrix_test.go",
|
||||
"proposer_builder_test.go",
|
||||
|
||||
@@ -112,6 +112,48 @@ func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.BeaconConfig())
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -154,3 +196,46 @@ func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privs := util.DeterministicGenesisStateCapella(t, 32)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockCapella(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wrappedBlk := util.SaveBlock(t, ctx, db, b)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{
|
||||
VerifiedOnly: true,
|
||||
}, mockStream), "Could not call RPC method")
|
||||
}(t)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -50,15 +49,16 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
sBlk, err := emptyBlockToSign(req.Slot)
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
|
||||
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
@@ -82,94 +82,46 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
// Set eth1 data.
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
eth1Data = ðpb.Eth1Data{DepositRoot: params.BeaconConfig().ZeroHash[:], BlockHash: params.BeaconConfig().ZeroHash[:]}
|
||||
log.WithError(err).Error("Could not get eth1data")
|
||||
} else {
|
||||
blk.Body().SetEth1Data(eth1Data)
|
||||
|
||||
// Set deposit and attestation.
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not pack deposits and attestations")
|
||||
} else {
|
||||
blk.Body().SetDeposits(deposits)
|
||||
blk.Body().SetAttestations(atts)
|
||||
}
|
||||
}
|
||||
blk.Body().SetEth1Data(eth1Data)
|
||||
|
||||
// Set proposer index
|
||||
// Set deposit and attestation.
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
|
||||
if err != nil {
|
||||
blk.Body().SetDeposits([]*ethpb.Deposit{})
|
||||
blk.Body().SetAttestations([]*ethpb.Attestation{})
|
||||
log.WithError(err).Error("Could not pack deposits and attestations")
|
||||
}
|
||||
blk.Body().SetDeposits(deposits)
|
||||
blk.Body().SetAttestations(atts)
|
||||
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not calculate proposer index %v", err)
|
||||
}
|
||||
blk.SetProposerIndex(idx)
|
||||
|
||||
// Set slashings
|
||||
// Set slashings.
|
||||
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
|
||||
blk.Body().SetProposerSlashings(validProposerSlashings)
|
||||
blk.Body().SetAttesterSlashings(validAttSlashings)
|
||||
|
||||
// Set exits
|
||||
// Set exits.
|
||||
blk.Body().SetVoluntaryExits(vs.getExits(head, req.Slot))
|
||||
|
||||
// Set sync aggregate. New in Altair.
|
||||
if req.Slot > 0 && slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(parentRoot))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
} else {
|
||||
if err := blk.Body().SetSyncAggregate(syncAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
if err := blk.Body().SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set default sync aggregate: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
vs.setSyncAggregate(ctx, blk)
|
||||
|
||||
// Set execution data. New in Bellatrix.
|
||||
if err := vs.setExecutionData(ctx, blk, head); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
// Set execution data. New in Bellatrix
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
fallBackToLocal := true
|
||||
canUseBuilder, err := vs.canUseBuilder(ctx, req.Slot, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to check if builder can be used")
|
||||
} else if canUseBuilder {
|
||||
h, err := vs.getPayloadHeaderFromBuilder(ctx, req.Slot, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to get payload header from builder")
|
||||
} else {
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.Body().SetExecution(h); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set execution payload")
|
||||
} else {
|
||||
fallBackToLocal = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if fallBackToLocal {
|
||||
executionData, err := vs.getExecutionPayload(ctx, req.Slot, idx, bytesutil.ToBytes32(parentRoot), head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get execution payload: %v", err)
|
||||
}
|
||||
if err := blk.Body().SetExecution(executionData); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution payload: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set bls to execution change. New in Capella
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
changes, err := vs.BLSChangesPool.BLSToExecChangesForInclusion(head)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get bls to execution changes")
|
||||
} else {
|
||||
if err := blk.Body().SetBLSToExecutionChanges(changes); err != nil {
|
||||
log.WithError(err).Error("Could not set bls to execution changes")
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set bls to execution change. New in Capella.
|
||||
vs.setBlsToExecData(blk, head)
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
@@ -183,42 +135,19 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}}, nil
|
||||
} else if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch && !blk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}}, nil
|
||||
} else if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch && blk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: pb.(*ethpb.BeaconBlockAltair)}}, nil
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
|
||||
}
|
||||
|
||||
func emptyBlockToSign(slot types.Slot) (interfaces.SignedBeaconBlock, error) {
|
||||
var sBlk interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
switch {
|
||||
case slots.ToEpoch(slot) < params.BeaconConfig().AltairForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
case slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
case slots.ToEpoch(slot) < params.BeaconConfig().CapellaForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
default:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
}
|
||||
return sBlk, err
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
|
||||
@@ -3,21 +3,52 @@ package validator
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
synccontribution "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.BeaconBlock) {
|
||||
if blk.Version() < version.Altair {
|
||||
return
|
||||
}
|
||||
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, blk.Slot()-1, blk.ParentRoot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get sync aggregate")
|
||||
emptySig := [96]byte{0xC0}
|
||||
emptyAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
if err := blk.Body().SetSyncAggregate(emptyAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Can not error. We already filter block versioning at the top. Phase 0 is impossible.
|
||||
if err := blk.Body().SetSyncAggregate(syncAggregate); err != nil {
|
||||
log.WithError(err).Error("Could not set sync aggregate")
|
||||
}
|
||||
}
|
||||
|
||||
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
|
||||
// The contributions are filtered based on matching of the input root and slot then profitability.
|
||||
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncCommitteePool == nil {
|
||||
return nil, errors.New("sync committee pool is nil")
|
||||
}
|
||||
// Contributions have to match the input root
|
||||
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
b, err := blocks.NewBeaconBlock(util.NewBeaconBlockAltair().Block)
|
||||
require.NoError(t, err)
|
||||
s := &Server{} // Sever is not initialized with sync committee pool.
|
||||
s.setSyncAggregate(context.Background(), b)
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
require.NoError(t, err)
|
||||
|
||||
emptySig := [96]byte{0xC0}
|
||||
want := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
SyncCommitteeSignature: emptySig[:],
|
||||
}
|
||||
require.DeepEqual(t, want, agg)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts, error) {
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||
@@ -98,7 +98,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
} else {
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts, errors.Errorf("unknown state type: %v", st.Version())
|
||||
return validAtts, invalidAtts
|
||||
}
|
||||
|
||||
for _, att := range a {
|
||||
@@ -108,7 +108,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
invalidAtts = append(invalidAtts, att)
|
||||
}
|
||||
return validAtts, invalidAtts, nil
|
||||
return validAtts, invalidAtts
|
||||
}
|
||||
|
||||
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
|
||||
@@ -247,10 +247,7 @@ func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.Beac
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
|
||||
defer span.End()
|
||||
|
||||
validAtts, invalidAtts, err := proposerAtts(atts).filter(ctx, st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
|
||||
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -12,9 +11,9 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
coreBlock "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -36,6 +35,38 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// block request. This value is known as `BUILDER_PROPOSAL_DELAY_TOLERANCE` in builder spec.
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.BeaconBlock, headState state.BeaconState) error {
|
||||
idx := blk.ProposerIndex()
|
||||
slot := blk.Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to check if builder can be used")
|
||||
} else if canUseBuilder {
|
||||
h, err := vs.getPayloadHeaderFromBuilder(ctx, slot, idx)
|
||||
if err != nil {
|
||||
builderGetPayloadMissCount.Inc()
|
||||
log.WithError(err).Warn("Proposer: failed to get payload header from builder")
|
||||
} else {
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.Body().SetExecution(h); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set execution payload")
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
return blk.Body().SetExecution(executionData)
|
||||
}
|
||||
|
||||
// This function retrieves the payload header given the slot number and the validator index.
|
||||
// It's a no-op if the latest head block is not versioned bellatrix.
|
||||
func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Slot, idx types.ValidatorIndex) (interfaces.ExecutionData, error) {
|
||||
@@ -55,6 +86,10 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, blockBuilderTimeout)
|
||||
defer cancel()
|
||||
|
||||
bid, err := vs.BlockBuilder.GetHeader(ctx, slot, bytesutil.ToBytes32(h.BlockHash()), pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,7 +98,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
|
||||
v := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(bid.Message.Value))
|
||||
v := bytesutil.LittleEndianBytesToBigInt(bid.Message.Value)
|
||||
if v.String() == "0" {
|
||||
return nil, errors.New("builder returned header with 0 bid amount")
|
||||
}
|
||||
@@ -89,7 +124,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
return nil, fmt.Errorf("incorrect timestamp %d != %d", bid.Message.Header.Timestamp, uint64(t.Unix()))
|
||||
}
|
||||
|
||||
if err := vs.validateBuilderSignature(bid); err != nil {
|
||||
if err := validateBuilderSignature(bid); err != nil {
|
||||
return nil, errors.Wrap(err, "could not validate builder signature")
|
||||
}
|
||||
|
||||
@@ -98,15 +133,14 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
|
||||
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
|
||||
}).Info("Received header with bid")
|
||||
|
||||
return coreBlock.WrappedExecutionPayloadHeader(bid.Message.Header)
|
||||
return consensusblocks.WrappedExecutionPayloadHeader(bid.Message.Header)
|
||||
}
|
||||
|
||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||
// bellatrix blind block. The output block will contain the full payload. The original header block
|
||||
// will be returned the block builder is not configured.
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := coreBlock.BeaconBlockIsNil(b); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -213,26 +247,8 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
|
||||
return wb, nil
|
||||
}
|
||||
|
||||
// readyForBuilder returns true if builder is allowed to be used. Builder is only allowed to be use after the
|
||||
// first finalized checkpt has been execution-enabled.
|
||||
func (vs *Server) readyForBuilder(ctx context.Context) (bool, error) {
|
||||
cp := vs.FinalizationFetcher.FinalizedCheckpt()
|
||||
// Checkpoint root is zero means we are still at genesis epoch.
|
||||
if bytesutil.ToBytes32(cp.Root) == params.BeaconConfig().ZeroHash {
|
||||
return false, nil
|
||||
}
|
||||
b, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err = consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return blocks.IsExecutionBlock(b.Block().Body())
|
||||
}
|
||||
|
||||
// Validates builder signature and returns an error if the signature is invalid.
|
||||
func (vs *Server) validateBuilderSignature(bid *ethpb.SignedBuilderBid) error {
|
||||
func validateBuilderSignature(bid *ethpb.SignedBuilderBid) error {
|
||||
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
|
||||
@@ -5,29 +5,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
blockchainTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
@@ -36,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestServer_getPayloadHeader(t *testing.T) {
|
||||
@@ -174,7 +158,11 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
require.ErrorContains(t, tc.err, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.returnedHeader, h)
|
||||
if tc.returnedHeader != nil {
|
||||
want, err := blocks.WrappedExecutionPayloadHeader(tc.returnedHeader)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, h)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -319,406 +307,6 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_readyForBuilder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
vs := &Server{BeaconDB: dbTest.SetupDB(t)}
|
||||
cs := &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}} // Checkpoint root is zeros.
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err := vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
wb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
wbr, err := wb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := blocks.NewSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb))
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb1))
|
||||
|
||||
// Ready is false given finalized block does not have execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Ready is true given finalized block has execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_HappyCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
}
|
||||
proposerServer.ProposerSlotIndexCache.SetProposerAndPayloadIDs(17, 11, [8]byte{'a'}, parentRoot)
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, emptyPayload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_LocalProgressingWithBuilderSkipped(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
}
|
||||
proposerServer.ProposerSlotIndexCache.SetProposerAndPayloadIDs(17, 11, [8]byte{'a'}, parentRoot)
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Configure builder, this should fail if it's not local engine processing
|
||||
proposerServer.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("bad)")}
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
SkipMevBoost: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, emptyPayload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_BuilderCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := consensusblocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := blocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := blocks.NewSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wb1))
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, prysmtime.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
tstamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
Timestamp: uint64(tstamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := &Server{
|
||||
FinalizationFetcher: &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}},
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false, Block: wb1},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Unix(int64(beaconState.GenesisTime()), 0)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ForkFetcher: &blockchainTest.ChainService{Fork: ðpb.Fork{}},
|
||||
GenesisFetcher: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
}
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
bid := ðpb.BuilderBid{
|
||||
Header: h,
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpb.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
proposerServer.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: sBid}
|
||||
proposerServer.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, proposerServer.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{11},
|
||||
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength), Pubkey: bytesutil.PadTo([]byte{}, fieldparams.BLSPubkeyLength)}}))
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg.MaxBuilderConsecutiveMissedSlots = bellatrixSlot + 1
|
||||
cfg.MaxBuilderEpochMissedSlots = 32
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_BlindedBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, h, bellatrixBlk.BlindedBellatrix.Body.ExecutionPayloadHeader) // Payload header should equal.
|
||||
}
|
||||
|
||||
func TestServer_validateBuilderSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -58,7 +58,7 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
|
||||
"currentSlot": s,
|
||||
"highestReceivedSlot": highestReceivedSlot,
|
||||
"maxConsecutiveSkipSlotsAllowed": maxConsecutiveSkipSlotsAllowed,
|
||||
}).Warn("Builder circuit breaker activated due to missing consecutive slot")
|
||||
}).Warn("Circuit breaker activated due to missing consecutive slot. Ignore if mev-boost is not used")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalMissed": diff,
|
||||
"maxEpochSkipSlotsAllowed": maxEpochSkipSlotsAllowed,
|
||||
}).Warn("Builder circuit breaker activated due to missing enough slots last epoch")
|
||||
}).Warn("Circuit breaker activated due to missing enough slots last epoch. Ignore if mev-boost is not used")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
err,
|
||||
)
|
||||
require.Equal(t, true, b)
|
||||
require.LogsContain(t, hook, "Builder circuit breaker activated due to missing consecutive slot")
|
||||
require.LogsContain(t, hook, "Circuit breaker activated due to missing consecutive slot. Ignore if mev-boost is not used")
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -56,7 +56,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, b)
|
||||
require.LogsContain(t, hook, "Builder circuit breaker activated due to missing enough slots last epoch")
|
||||
require.LogsContain(t, hook, "Circuit breaker activated due to missing enough slots last epoch. Ignore if mev-boost is not used")
|
||||
|
||||
want := params.BeaconConfig().SlotsPerEpoch - params.BeaconConfig().MaxBuilderEpochMissedSlots
|
||||
for i := types.Slot(2); i <= want+2; i++ {
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// Sets the bls to exec data for a block.
|
||||
func (vs *Server) setBlsToExecData(blk interfaces.BeaconBlock, headState state.BeaconState) {
|
||||
if blk.Version() < version.Capella {
|
||||
return
|
||||
}
|
||||
if err := blk.Body().SetBLSToExecutionChanges([]*ethpb.SignedBLSToExecutionChange{}); err != nil {
|
||||
log.WithError(err).Error("Could not set bls to execution data in block")
|
||||
return
|
||||
}
|
||||
changes, err := vs.BLSChangesPool.BLSToExecChangesForInclusion(headState)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get bls to execution changes")
|
||||
return
|
||||
} else {
|
||||
if err := blk.Body().SetBLSToExecutionChanges(changes); err != nil {
|
||||
log.WithError(err).Error("Could not set bls to execution changes")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,14 +40,9 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot.
|
||||
// The function has full awareness of pre and post merge.
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context,
|
||||
slot types.Slot,
|
||||
vIdx types.ValidatorIndex,
|
||||
headRoot [32]byte,
|
||||
st state.BeaconState) (interfaces.ExecutionData, error) {
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
|
||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||
@@ -77,7 +72,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context,
|
||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
warnIfFeeRecipientDiffers(payload.FeeRecipient(), feeRecipient)
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
default:
|
||||
@@ -104,14 +99,14 @@ func (vs *Server) getExecutionPayload(ctx context.Context,
|
||||
parentHash = header.BlockHash()
|
||||
} else {
|
||||
if activationEpochNotReached(slot) {
|
||||
return emptyPayload()
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
}
|
||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !hasTerminalBlock {
|
||||
return emptyPayload()
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
}
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
@@ -147,15 +142,18 @@ func (vs *Server) getExecutionPayload(ctx context.Context,
|
||||
FinalizedBlockHash: finalizedBlockHash,
|
||||
}
|
||||
|
||||
p, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
p := &enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: random,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
})
|
||||
}
|
||||
|
||||
// This will change in subsequent hardforks like Capella.
|
||||
pa, err := payloadattribute.New(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, p)
|
||||
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, pa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not prepare payload")
|
||||
}
|
||||
@@ -166,18 +164,18 @@ func (vs *Server) getExecutionPayload(ctx context.Context,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
warnIfFeeRecipientDiffers(payload.FeeRecipient(), feeRecipient)
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
||||
// match the requested one.
|
||||
func warnIfFeeRecipientDiffers(payloadRecipient []byte, feeRecipient common.Address) {
|
||||
func warnIfFeeRecipientDiffers(payload interfaces.ExecutionData, feeRecipient common.Address) {
|
||||
// Warn if the fee recipient is not the value we expect.
|
||||
if !bytes.Equal(payloadRecipient, feeRecipient[:]) {
|
||||
if payload != nil && !bytes.Equal(payload.FeeRecipient(), feeRecipient[:]) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"wantedFeeRecipient": fmt.Sprintf("%#x", feeRecipient),
|
||||
"received": fmt.Sprintf("%#x", payloadRecipient),
|
||||
"received": fmt.Sprintf("%#x", payload.FeeRecipient()),
|
||||
}).Warn("Fee recipient address from execution client is not what was expected. " +
|
||||
"It is possible someone has compromised your client to try and take your transaction fees")
|
||||
}
|
||||
@@ -229,8 +227,8 @@ func activationEpochNotReached(slot types.Slot) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func emptyPayload() (interfaces.ExecutionData, error) {
|
||||
return consensusblocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
return &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
@@ -239,5 +237,5 @@ func emptyPayload() (interfaces.ExecutionData, error) {
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -160,7 +160,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
|
||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'})
|
||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -205,7 +205,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{})
|
||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
@@ -217,7 +217,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
payload.FeeRecipient = evilRecipientAddress[:]
|
||||
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{})
|
||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
|
||||
@@ -19,14 +19,19 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
coretime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -51,6 +56,449 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestServer_GetBeaconBlock_Phase0(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
util.SaveBlock(t, ctx, db, genBlk)
|
||||
|
||||
parentRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
proposerSlashings, attSlashings := injectSlashings(t, beaconState, privKeys, proposerServer)
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
phase0Blk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Phase0)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, req.Slot, phase0Blk.Phase0.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], phase0Blk.Phase0.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, phase0Blk.Phase0.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, phase0Blk.Phase0.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(phase0Blk.Phase0.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, phase0Blk.Phase0.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(phase0Blk.Phase0.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, phase0Blk.Phase0.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Altair(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
altairSlot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: altairSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: altairSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
proposerSlashings, attSlashings := injectSlashings(t, beaconState, privKeys, proposerServer)
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
altairBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Altair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, altairBlk.Altair.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], altairBlk.Altair.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, altairBlk.Altair.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, altairBlk.Altair.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(altairBlk.Altair.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, altairBlk.Altair.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(altairBlk.Altair.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, altairBlk.Altair.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Bellatrix(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
c := mockExecution.New()
|
||||
c.HashesByHeight[0] = terminalBlockHash
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
proposerServer.Eth1BlockFetcher = c
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, bellatrixBlk.Bellatrix.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], bellatrixBlk.Bellatrix.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
newHook := logTest.NewGlobal()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg = params.MinimalSpecConfig().Copy()
|
||||
cfg.DefaultFeeRecipient = common.Address{'b'}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Capella(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
transition.SkipSlotCache.Disable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
Slot: capellaSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyCapella{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), capellaSlot+1)
|
||||
require.NoError(t, err)
|
||||
payload := &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := getProposerServer(db, beaconState, parentRoot[:])
|
||||
proposerServer.ExecutionEngineCaller = &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayloadCapella: payload,
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: capellaSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
copiedState := beaconState.Copy()
|
||||
copiedState, err = transition.ProcessSlots(ctx, copiedState, capellaSlot+1)
|
||||
require.NoError(t, err)
|
||||
change, err := util.GenerateBLSToExecutionChange(copiedState, privKeys[1], 0)
|
||||
require.NoError(t, err)
|
||||
proposerServer.BLSChangesPool.InsertBLSToExecChange(change)
|
||||
|
||||
got, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got.GetCapella().Body.BlsToExecutionChanges))
|
||||
require.DeepEqual(t, change, got.GetCapella().Body.BlsToExecutionChanges[0])
|
||||
}
|
||||
|
||||
func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
_, err = proposerServer.GetBeaconBlock(context.Background(), req)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
}
|
||||
|
||||
func getProposerServer(db db.HeadAccessDatabase, headState state.BeaconState, headRoot []byte) *Server {
|
||||
return &Server{
|
||||
HeadFetcher: &mock.ChainService{State: headState, Root: headRoot},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
OptimisticModeFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BeaconDB: db,
|
||||
BLSChangesPool: blstoexec.NewPool(),
|
||||
}
|
||||
}
|
||||
|
||||
func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, server *Server) ([]*ethpb.ProposerSlashing, []*ethpb.AttesterSlashing) {
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(st, keys[i], i /* validator index */)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = server.SlashingsPool.InsertProposerSlashing(context.Background(), st, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(st, keys[i+params.BeaconConfig().MaxProposerSlashings], types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings) /* validator index */)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = server.SlashingsPool.InsertAttesterSlashing(context.Background(), st, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return proposerSlashings, attSlashings
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1914,382 +2362,6 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_PreForkEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
util.SaveBlock(t, ctx, db, genBlk)
|
||||
|
||||
parentRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
phase0Blk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Phase0)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, phase0Blk.Phase0.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], phase0Blk.Phase0.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, phase0Blk.Phase0.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, phase0Blk.Phase0.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(phase0Blk.Phase0.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, phase0Blk.Phase0.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(phase0Blk.Phase0.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, phase0Blk.Phase0.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
altairSlot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: altairSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: altairSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
altairBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Altair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, altairBlk.Altair.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], altairBlk.Altair.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, altairBlk.Altair.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, altairBlk.Altair.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(altairBlk.Altair.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, altairBlk.Altair.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(altairBlk.Altair.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, altairBlk.Altair.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
util.SaveBlock(t, ctx, db, genesis)
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
c := mockExecution.New()
|
||||
c.HashesByHeight[0] = terminalBlockHash
|
||||
random, err := helpers.RandaoMix(beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: uint64(timeStamp.Unix()),
|
||||
}
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: c,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
require.NoError(t, err)
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
block, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, req.Slot, bellatrixBlk.Bellatrix.Slot)
|
||||
assert.DeepEqual(t, parentRoot[:], bellatrixBlk.Bellatrix.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
newHook := logTest.NewGlobal()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg = params.MinimalSpecConfig().Copy()
|
||||
cfg.DefaultFeeRecipient = common.Address{'b'}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
_, err = proposerServer.GetBeaconBlock(context.Background(), req)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
|
||||
@@ -63,7 +63,6 @@ type Server struct {
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
@@ -74,6 +73,7 @@ type Server struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -113,13 +114,29 @@ func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.Beacon
|
||||
}
|
||||
case "finalized":
|
||||
checkpoint := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
targetSlot, err := slots.EpochStart(checkpoint.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get start slot")
|
||||
}
|
||||
// We use the stategen replayer to fetch the finalized state and then
|
||||
// replay it to the start slot of our checkpoint's epoch. The replayer
|
||||
// only ever accesses our canonical history, so the state retrieved will
|
||||
// always be the finalized state at that epoch.
|
||||
s, err = p.ReplayerBuilder.ReplayerForSlot(targetSlot).ReplayToSlot(ctx, targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized state")
|
||||
}
|
||||
case "justified":
|
||||
checkpoint := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
|
||||
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
targetSlot, err := slots.EpochStart(checkpoint.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get start slot")
|
||||
}
|
||||
// We use the stategen replayer to fetch the justified state and then
|
||||
// replay it to the start slot of our checkpoint's epoch. The replayer
|
||||
// only ever accesses our canonical history, so the state retrieved will
|
||||
// always be the justified state at that epoch.
|
||||
s, err = p.ReplayerBuilder.ReplayerForSlot(targetSlot).ReplayToSlot(ctx, targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get justified state")
|
||||
}
|
||||
@@ -203,9 +220,6 @@ func (p *StateProvider) StateBySlot(ctx context.Context, target types.Slot) (sta
|
||||
if target > p.GenesisTimeFetcher.CurrentSlot() {
|
||||
return nil, errors.New("requested slot is in the future")
|
||||
}
|
||||
if target > p.ChainInfoFetcher.HeadSlot() {
|
||||
return nil, errors.New("requested slot number is higher than head slot number")
|
||||
}
|
||||
|
||||
st, err := p.ReplayerBuilder.ReplayerForSlot(target).ReplayBlocks(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -94,15 +94,19 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
stateGen := mockstategen.NewMockService()
|
||||
replayer := mockstategen.NewMockReplayerBuilder()
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
Root: stateRoot[:],
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
StateGenService: stateGen,
|
||||
ReplayerBuilder: replayer,
|
||||
}
|
||||
|
||||
s, err := p.State(ctx, []byte("finalized"))
|
||||
@@ -114,15 +118,19 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
stateGen := mockstategen.NewMockService()
|
||||
replayer := mockstategen.NewMockReplayerBuilder()
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
Root: stateRoot[:],
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
StateGenService: stateGen,
|
||||
ReplayerBuilder: replayer,
|
||||
}
|
||||
|
||||
s, err := p.State(ctx, []byte("justified"))
|
||||
@@ -387,11 +395,16 @@ func TestStateBySlot_FutureSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
st, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 100})
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 100})
|
||||
require.NoError(t, err)
|
||||
slotSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 101})
|
||||
require.NoError(t, err)
|
||||
currentSlot := types.Slot(102)
|
||||
mock := &chainMock.ChainService{State: st, Slot: ¤tSlot}
|
||||
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
_, err = p.StateBySlot(context.Background(), 101)
|
||||
assert.ErrorContains(t, "requested slot number is higher than head slot number", err)
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewMockReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(slotSt, 101)
|
||||
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
st, err := p.StateBySlot(context.Background(), 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
@@ -18,11 +18,9 @@ import (
|
||||
type BeaconState interface {
|
||||
SpecParametersProvider
|
||||
ReadOnlyBeaconState
|
||||
ReadOnlyWithdrawals
|
||||
WriteOnlyBeaconState
|
||||
Copy() BeaconState
|
||||
HashTreeRoot(ctx context.Context) ([32]byte, error)
|
||||
FutureForkStub
|
||||
StateProver
|
||||
}
|
||||
|
||||
@@ -50,6 +48,10 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlyBalances
|
||||
ReadOnlyCheckpoint
|
||||
ReadOnlyAttestations
|
||||
ReadOnlyWithdrawals
|
||||
ReadOnlyParticipation
|
||||
ReadOnlyInactivity
|
||||
ReadOnlySyncCommittee
|
||||
ToProtoUnsafe() interface{}
|
||||
ToProto() interface{}
|
||||
GenesisTime() uint64
|
||||
@@ -57,8 +59,8 @@ type ReadOnlyBeaconState interface {
|
||||
Slot() types.Slot
|
||||
Fork() *ethpb.Fork
|
||||
LatestBlockHeader() *ethpb.BeaconBlockHeader
|
||||
HistoricalRoots() [][]byte
|
||||
HistoricalSummaries() []*ethpb.HistoricalSummary
|
||||
HistoricalRoots() ([][]byte, error)
|
||||
HistoricalSummaries() ([]*ethpb.HistoricalSummary, error)
|
||||
Slashings() []uint64
|
||||
FieldReferencesCount() map[string]uint64
|
||||
MarshalSSZ() ([]byte, error)
|
||||
@@ -77,6 +79,9 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyBalances
|
||||
WriteOnlyCheckpoint
|
||||
WriteOnlyAttestations
|
||||
WriteOnlyParticipation
|
||||
WriteOnlyInactivity
|
||||
WriteOnlySyncCommittee
|
||||
SetGenesisTime(val uint64) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val types.Slot) error
|
||||
@@ -86,7 +91,7 @@ type WriteOnlyBeaconState interface {
|
||||
SetSlashings(val []uint64) error
|
||||
UpdateSlashingsAtIndex(idx, val uint64) error
|
||||
AppendHistoricalRoots(root [32]byte) error
|
||||
AppendHistoricalSummariesUpdate(*ethpb.HistoricalSummary) error
|
||||
AppendHistoricalSummaries(*ethpb.HistoricalSummary) error
|
||||
SetLatestExecutionPayloadHeader(payload interfaces.ExecutionData) error
|
||||
SetNextWithdrawalIndex(i uint64) error
|
||||
SetNextWithdrawalValidatorIndex(i types.ValidatorIndex) error
|
||||
@@ -135,6 +140,7 @@ type ReadOnlyCheckpoint interface {
|
||||
FinalizedCheckpoint() *ethpb.Checkpoint
|
||||
FinalizedCheckpointEpoch() types.Epoch
|
||||
JustificationBits() bitfield.Bitvector4
|
||||
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyBlockRoots defines a struct which only has read access to block roots methods.
|
||||
@@ -176,6 +182,23 @@ type ReadOnlyWithdrawals interface {
|
||||
NextWithdrawalIndex() (uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyParticipation defines a struct which only has read access to participation methods.
|
||||
type ReadOnlyParticipation interface {
|
||||
CurrentEpochParticipation() ([]byte, error)
|
||||
PreviousEpochParticipation() ([]byte, error)
|
||||
}
|
||||
|
||||
// ReadOnlyInactivity defines a struct which only has read access to inactivity methods.
|
||||
type ReadOnlyInactivity interface {
|
||||
InactivityScores() ([]uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlySyncCommittee defines a struct which only has read access to sync committee methods.
|
||||
type ReadOnlySyncCommittee interface {
|
||||
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
NextSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
}
|
||||
|
||||
// WriteOnlyBlockRoots defines a struct which only has write access to block roots methods.
|
||||
type WriteOnlyBlockRoots interface {
|
||||
SetBlockRoots(val [][]byte) error
|
||||
@@ -234,23 +257,24 @@ type WriteOnlyAttestations interface {
|
||||
RotateAttestations() error
|
||||
}
|
||||
|
||||
// FutureForkStub defines methods that are used for future forks. This is a low cost solution to enable
|
||||
// various state casting of interface to work.
|
||||
type FutureForkStub interface {
|
||||
// WriteOnlyParticipation defines a struct which only has write access to participation methods.
|
||||
type WriteOnlyParticipation interface {
|
||||
AppendCurrentParticipationBits(val byte) error
|
||||
AppendPreviousParticipationBits(val byte) error
|
||||
AppendInactivityScore(s uint64) error
|
||||
CurrentEpochParticipation() ([]byte, error)
|
||||
PreviousEpochParticipation() ([]byte, error)
|
||||
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
|
||||
InactivityScores() ([]uint64, error)
|
||||
SetInactivityScores(val []uint64) error
|
||||
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
SetPreviousParticipationBits(val []byte) error
|
||||
SetCurrentParticipationBits(val []byte) error
|
||||
ModifyCurrentParticipationBits(func(val []byte) ([]byte, error)) error
|
||||
ModifyPreviousParticipationBits(func(val []byte) ([]byte, error)) error
|
||||
NextSyncCommittee() (*ethpb.SyncCommittee, error)
|
||||
}
|
||||
|
||||
// WriteOnlyInactivity defines a struct which only has write access to inactivity methods.
|
||||
type WriteOnlyInactivity interface {
|
||||
AppendInactivityScore(s uint64) error
|
||||
SetInactivityScores(val []uint64) error
|
||||
}
|
||||
|
||||
// WriteOnlySyncCommittee defines a struct which only has write access to sync committee methods.
|
||||
type WriteOnlySyncCommittee interface {
|
||||
SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
SetNextSyncCommittee(val *ethpb.SyncCommittee) error
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package state_native
|
||||
import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// GenesisTime of the beacon state as a uint64.
|
||||
@@ -67,15 +68,15 @@ func (b *BeaconState) forkVal() *ethpb.Fork {
|
||||
}
|
||||
|
||||
// HistoricalRoots based on epochs stored in the beacon state.
|
||||
func (b *BeaconState) HistoricalRoots() [][]byte {
|
||||
func (b *BeaconState) HistoricalRoots() ([][]byte, error) {
|
||||
if b.historicalRoots == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.historicalRoots.Slice()
|
||||
return b.historicalRoots.Slice(), nil
|
||||
}
|
||||
|
||||
// balancesLength returns the length of the balances slice.
|
||||
@@ -89,15 +90,19 @@ func (b *BeaconState) balancesLength() int {
|
||||
}
|
||||
|
||||
// HistoricalSummaries of the beacon state.
|
||||
func (b *BeaconState) HistoricalSummaries() []*ethpb.HistoricalSummary {
|
||||
func (b *BeaconState) HistoricalSummaries() ([]*ethpb.HistoricalSummary, error) {
|
||||
if b.version < version.Capella {
|
||||
return nil, errNotSupported("HistoricalSummaries", b.version)
|
||||
}
|
||||
|
||||
if b.historicalSummaries == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.historicalSummariesVal()
|
||||
return b.historicalSummariesVal(), nil
|
||||
}
|
||||
|
||||
// historicalSummariesVal of the beacon state.
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
nativetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native/types"
|
||||
@@ -14,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -260,7 +257,7 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots[nativetypes.NextWithdrawalValidatorIndex.RealPosition()] = nextWithdrawalValidatorIndexRoot
|
||||
|
||||
// Historical summary root.
|
||||
historicalSummaryRoot, err := historicalSummaryRoot(state.historicalSummaries)
|
||||
historicalSummaryRoot, err := stateutil.HistoricalSummariesRoot(state.historicalSummaries)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute historical summary merkleization")
|
||||
}
|
||||
@@ -269,39 +266,3 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
|
||||
return fieldRoots, nil
|
||||
}
|
||||
|
||||
func historicalSummaryRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
max := uint64(fieldparams.HistoricalRootsLength)
|
||||
if uint64(len(summaries)) > max {
|
||||
return [32]byte{}, fmt.Errorf("historical summary exceeds max length %d", max)
|
||||
}
|
||||
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
roots := make([][32]byte, len(summaries))
|
||||
for i := 0; i < len(summaries); i++ {
|
||||
r, err := summaries[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not merkleize historical summary")
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
summariesRoot, err := ssz.BitwiseMerkleize(
|
||||
hasher,
|
||||
roots,
|
||||
uint64(len(roots)),
|
||||
fieldparams.HistoricalRootsLength,
|
||||
)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute epoch attestations merkleization")
|
||||
}
|
||||
summariesLenBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(summariesLenBuf, binary.LittleEndian, uint64(len(summaries))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal historical summary length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
summariesLenRoot := make([]byte, 32)
|
||||
copy(summariesLenRoot, summariesLenBuf.Bytes())
|
||||
res := ssz.MixInLength(summariesRoot, summariesLenRoot)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -258,6 +258,10 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, beaconState.SetNextWithdrawalIndex(123))
|
||||
require.NoError(t, beaconState.SetNextWithdrawalValidatorIndex(123))
|
||||
require.NoError(t, beaconState.AppendHistoricalSummaries(ðpb.HistoricalSummary{
|
||||
BlockSummaryRoot: bytesutil.PadTo([]byte("block summary root"), 32),
|
||||
StateSummaryRoot: bytesutil.PadTo([]byte("state summary root"), 32),
|
||||
}))
|
||||
|
||||
nativeState, ok := beaconState.(*statenative.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -298,6 +302,7 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
|
||||
{0x39, 0x29, 0x16, 0xe8, 0x5a, 0xd2, 0xb, 0xbb, 0x1f, 0xef, 0x6a, 0xe0, 0x2d, 0xa6, 0x6a, 0x46, 0x81, 0xba, 0xcf, 0x86, 0xfc, 0x16, 0x22, 0x2a, 0x9b, 0x72, 0x96, 0x71, 0x2b, 0xc7, 0x5b, 0x9d},
|
||||
{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
{0xa1, 0x4, 0x64, 0x31, 0x2a, 0xa, 0x49, 0x31, 0x1c, 0x1, 0x41, 0x17, 0xc0, 0x52, 0x52, 0xfa, 0x4c, 0xf4, 0x95, 0x4f, 0x5c, 0xb0, 0x5a, 0x40, 0xc1, 0x32, 0x39, 0xc3, 0x7c, 0xb7, 0x2c, 0x27},
|
||||
}
|
||||
assert.DeepEqual(t, expected, root)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
nativetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
@@ -115,7 +113,7 @@ func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version > version.Bellatrix {
|
||||
return fmt.Errorf("AppendHistoricalRoots is not supported for version %d", b.version)
|
||||
return errNotSupported("AppendHistoricalRoots", b.version)
|
||||
}
|
||||
|
||||
roots := b.historicalRoots
|
||||
@@ -131,14 +129,14 @@ func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendHistoricalSummariesUpdate AppendHistoricalSummary for the beacon state. Appends the new value
|
||||
// AppendHistoricalSummaries for the beacon state. Appends the new value
|
||||
// to the end of list.
|
||||
func (b *BeaconState) AppendHistoricalSummariesUpdate(summary *ethpb.HistoricalSummary) error {
|
||||
func (b *BeaconState) AppendHistoricalSummaries(summary *ethpb.HistoricalSummary) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version < version.Capella {
|
||||
return fmt.Errorf("AppendHistoricalSummariesUpdate is not supported for version %d", b.version)
|
||||
return errNotSupported("AppendHistoricalSummaries", b.version)
|
||||
}
|
||||
|
||||
summaries := b.historicalSummaries
|
||||
|
||||
@@ -515,6 +515,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
// Large arrays, increases over time.
|
||||
balances: b.balances,
|
||||
historicalRoots: b.historicalRoots,
|
||||
historicalSummaries: b.historicalSummaries,
|
||||
validators: b.validators,
|
||||
previousEpochParticipation: b.previousEpochParticipation,
|
||||
currentEpochParticipation: b.currentEpochParticipation,
|
||||
@@ -533,7 +534,6 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
nextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||
latestExecutionPayloadHeader: b.latestExecutionPayloadHeaderVal(),
|
||||
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapellaVal(),
|
||||
historicalSummaries: b.historicalSummariesVal(),
|
||||
|
||||
dirtyFields: make(map[nativetypes.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[nativetypes.FieldIndex][]uint64, fieldCount),
|
||||
@@ -838,7 +838,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field nativetypes.FieldI
|
||||
case nativetypes.NextWithdrawalValidatorIndex:
|
||||
return ssz.Uint64Root(uint64(b.nextWithdrawalValidatorIndex)), nil
|
||||
case nativetypes.HistoricalSummaries:
|
||||
return historicalSummaryRoot(b.historicalSummaries)
|
||||
return stateutil.HistoricalSummariesRoot(b.historicalSummaries)
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
"field_root_vector.go",
|
||||
"historical_summaries_root.go",
|
||||
"participation_bit_root.go",
|
||||
"pending_attestation_root.go",
|
||||
"reference.go",
|
||||
|
||||
49
beacon-chain/state/stateutil/historical_summaries_root.go
Normal file
49
beacon-chain/state/stateutil/historical_summaries_root.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
max := uint64(fieldparams.HistoricalRootsLength)
|
||||
if uint64(len(summaries)) > max {
|
||||
return [32]byte{}, fmt.Errorf("historical summary exceeds max length %d", max)
|
||||
}
|
||||
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
roots := make([][32]byte, len(summaries))
|
||||
for i := 0; i < len(summaries); i++ {
|
||||
r, err := summaries[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not merkleize historical summary")
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
summariesRoot, err := ssz.BitwiseMerkleize(
|
||||
hasher,
|
||||
roots,
|
||||
uint64(len(roots)),
|
||||
fieldparams.HistoricalRootsLength,
|
||||
)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute historical summaries merkleization")
|
||||
}
|
||||
summariesLenBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(summariesLenBuf, binary.LittleEndian, uint64(len(summaries))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal historical summary length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
summariesLenRoot := make([]byte, 32)
|
||||
copy(summariesLenRoot, summariesLenBuf.Bytes())
|
||||
res := ssz.MixInLength(summariesRoot, summariesLenRoot)
|
||||
return res, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"batch_verifier.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
@@ -133,6 +134,7 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
|
||||
29
beacon-chain/sync/broadcast_bls_changes.go
Normal file
29
beacon-chain/sync/broadcast_bls_changes.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// This routine broadcasts all known BLS changes at the Capella fork.
|
||||
func (s *Service) broadcastBLSChanges(currSlot types.Slot) error {
|
||||
capellaSlotStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
if err != nil {
|
||||
// only possible error is an overflow, so we exit early from the method
|
||||
return nil
|
||||
}
|
||||
if currSlot == capellaSlotStart {
|
||||
changes, err := s.cfg.blsToExecPool.PendingBLSToExecChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLS to execution changes")
|
||||
}
|
||||
for _, ch := range changes {
|
||||
if err := s.cfg.p2p.Broadcast(s.ctx, ch); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast BLS to execution changes.")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
50
beacon-chain/sync/broadcast_bls_changes_test.go
Normal file
50
beacon-chain/sync/broadcast_bls_changes_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mockChain "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
func TestBroadcastBLSChanges(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
s := NewService(context.Background(),
|
||||
WithP2P(mockp2p.NewTestP2P(t)),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithStateNotifier(chainService.StateNotifier()),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
var emptySig [96]byte
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(ðpb.SignedBLSToExecutionChange{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 10,
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
})
|
||||
|
||||
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.broadcastBLSChanges(capellaStart))
|
||||
require.NoError(t, s.broadcastBLSChanges(capellaStart+1))
|
||||
|
||||
}
|
||||
@@ -28,6 +28,12 @@ func (s *Service) forkWatcher() {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
// Broadcast BLS changes at the Capella fork boundary
|
||||
if err := s.broadcastBLSChanges(currSlot); err != nil {
|
||||
log.WithError(err).Error("Unable to broadcast BLS to execution changes")
|
||||
continue
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
slotTicker.Done()
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v3/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
p2ppb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -77,7 +78,7 @@ type blocksFetcher struct {
|
||||
chain blockchainService
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
blocksPerSecond uint64
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
fetchRequests chan *fetchRequestParams
|
||||
@@ -112,11 +113,11 @@ type fetchRequestResponse struct {
|
||||
|
||||
// newBlocksFetcher creates ready to use fetcher.
|
||||
func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetcher {
|
||||
blocksPerSecond := flags.Get().BlockBatchLimit
|
||||
blocksPerPeriod := flags.Get().BlockBatchLimit
|
||||
allowedBlocksBurst := flags.Get().BlockBatchLimitBurstFactor * flags.Get().BlockBatchLimit
|
||||
// Allow fetcher to go almost to the full burst capacity (less a single batch).
|
||||
rateLimiter := leakybucket.NewCollector(
|
||||
float64(blocksPerSecond), int64(allowedBlocksBurst-blocksPerSecond),
|
||||
float64(blocksPerPeriod), int64(allowedBlocksBurst-blocksPerPeriod),
|
||||
blockLimiterPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
capacityWeight := cfg.peerFilterCapacityWeight
|
||||
@@ -132,7 +133,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
chain: cfg.chain,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
blocksPerSecond: uint64(blocksPerSecond),
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
fetchRequests: make(chan *fetchRequestParams, maxPendingRequests),
|
||||
@@ -323,7 +324,7 @@ func (f *blocksFetcher) requestBlocks(
|
||||
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||
}).Debug("Requesting blocks")
|
||||
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {
|
||||
if err := f.waitForBandwidth(pid); err != nil {
|
||||
if err := f.waitForBandwidth(pid, req.Count); err != nil {
|
||||
l.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func (f *blocksFetcher) requestBlocksByRoot(
|
||||
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||
}).Debug("Requesting blocks (by roots)")
|
||||
if f.rateLimiter.Remaining(pid.String()) < int64(len(*req)) {
|
||||
if err := f.waitForBandwidth(pid); err != nil {
|
||||
if err := f.waitForBandwidth(pid, uint64(len(*req))); err != nil {
|
||||
l.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
@@ -363,9 +364,19 @@ func (f *blocksFetcher) requestBlocksByRoot(
|
||||
}
|
||||
|
||||
// waitForBandwidth blocks up until peer's bandwidth is restored.
|
||||
func (f *blocksFetcher) waitForBandwidth(pid peer.ID) error {
|
||||
func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
log.WithField("peer", pid).Debug("Slowing down for rate limit")
|
||||
timer := time.NewTimer(f.rateLimiter.TillEmpty(pid.String()))
|
||||
rem := f.rateLimiter.Remaining(pid.String())
|
||||
if uint64(rem) >= count {
|
||||
// Exit early if we have sufficient capacity
|
||||
return nil
|
||||
}
|
||||
intCount, err := math.Int(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toWait := timeToWait(int64(intCount), rem, f.rateLimiter.Capacity(), f.rateLimiter.TillEmpty(pid.String()))
|
||||
timer := time.NewTimer(toWait)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
@@ -375,3 +386,18 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine how long it will take for us to have the required number of blocks allowed by our rate limiter.
|
||||
// We do this by calculating the duration till the rate limiter can request these blocks without exceeding
|
||||
// the provided bandwidth limits per peer.
|
||||
func timeToWait(wanted, rem, capacity int64, timeTillEmpty time.Duration) time.Duration {
|
||||
// Defensive check if we have more than enough blocks
|
||||
// to request from the peer.
|
||||
if rem >= wanted {
|
||||
return 0
|
||||
}
|
||||
blocksNeeded := wanted - rem
|
||||
currentNumBlks := capacity - rem
|
||||
expectedTime := int64(timeTillEmpty) * blocksNeeded / currentNumBlks
|
||||
return time.Duration(expectedTime)
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
|
||||
remaining, capacity := float64(f.rateLimiter.Remaining(peerID.String())), float64(f.rateLimiter.Capacity())
|
||||
// When capacity is close to exhaustion, allow less performant peer to take a chance.
|
||||
// Otherwise, there's a good chance system will be forced to wait for rate limiter.
|
||||
if remaining < float64(f.blocksPerSecond) {
|
||||
if remaining < float64(f.blocksPerPeriod) {
|
||||
return 0.0
|
||||
}
|
||||
capScore := remaining / capacity
|
||||
|
||||
@@ -590,6 +590,42 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
require.LogsContain(t, hook, fmt.Sprintf("msg=\"Slowing down for rate limit\" peer=%s", p2.PeerID()))
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Step: 1,
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
p2.BHost.SetStreamHandler(protocol, streamHandlerFn)
|
||||
|
||||
burstFactor := uint64(flags.Get().BlockBatchLimitBurstFactor)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 5*time.Second, false)
|
||||
fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
start := time.Now()
|
||||
assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), 10))
|
||||
dur := time.Since(start)
|
||||
assert.Equal(t, true, dur < time.Millisecond, "waited excessively for bandwidth")
|
||||
fetcher.rateLimiter.Add(p2.PeerID().String(), int64(req.Count*burstFactor))
|
||||
start = time.Now()
|
||||
assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), req.Count))
|
||||
dur = time.Since(start)
|
||||
assert.Equal(t, float64(5), dur.Truncate(1*time.Second).Seconds(), "waited excessively for bandwidth")
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
tests := []struct {
|
||||
@@ -860,3 +896,46 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeToWait(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
wanted int64
|
||||
rem int64
|
||||
capacity int64
|
||||
timeTillEmpty time.Duration
|
||||
want time.Duration
|
||||
}{
|
||||
{
|
||||
name: "Limiter has sufficient blocks",
|
||||
wanted: 64,
|
||||
rem: 64,
|
||||
capacity: 320,
|
||||
timeTillEmpty: 200 * time.Second,
|
||||
want: 0 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "Limiter has reached full capacity",
|
||||
wanted: 64,
|
||||
rem: 0,
|
||||
capacity: 640,
|
||||
timeTillEmpty: 60 * time.Second,
|
||||
want: 6 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "Requesting full capacity from peer",
|
||||
wanted: 640,
|
||||
rem: 0,
|
||||
capacity: 640,
|
||||
timeTillEmpty: 60 * time.Second,
|
||||
want: 60 * time.Second,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := timeToWait(tt.wanted, tt.rem, tt.capacity, tt.timeTillEmpty); got != tt.want {
|
||||
t.Errorf("timeToWait() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,6 +374,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
|
||||
t.Run("no diverging blocks", func(t *testing.T) {
|
||||
p2 := connectPeerHavingBlocks(t, p1, knownBlocks, 64, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -385,6 +386,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
forkedSlot := types.Slot(24)
|
||||
altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -397,6 +399,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
t.Run("first block is diverging - no common ancestor", func(t *testing.T) {
|
||||
altBlocks := extendBlockSequence(t, []*ethpb.SignedBeaconBlock{}, 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
@@ -408,6 +411,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
forkedSlot := types.Slot(60)
|
||||
altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
|
||||
p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
|
||||
@@ -179,7 +179,7 @@ func (q *blocksQueue) loop() {
|
||||
if startSlot > startBackSlots {
|
||||
startSlot -= startBackSlots
|
||||
}
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
q.smm.addStateMachine(i)
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
|
||||
m.setState(stateSkipped)
|
||||
return m.state, errSlotIsTooHigh
|
||||
}
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.blocksFetcher.scheduleRequest(ctx, m.start, blocksPerRequest); err != nil {
|
||||
return m.state, err
|
||||
}
|
||||
|
||||
@@ -849,7 +849,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
}
|
||||
@@ -877,7 +877,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
var machineSlots []types.Slot
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -928,7 +928,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
|
||||
|
||||
startSlot := queue.chain.HeadSlot()
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
var machineSlots []types.Slot
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -1118,7 +1118,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
p2p.Peers().SetChainState(emptyPeer, chainState)
|
||||
|
||||
startSlot := mc.HeadSlot() + 1
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
machineSlots := make([]types.Slot, 0)
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
@@ -1168,7 +1168,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
// its claims with actual blocks.
|
||||
forkedPeer := connectPeerHavingBlocks(t, p2p, chain2, finalizedSlot, p2p.Peers())
|
||||
startSlot := mc.HeadSlot() + 1
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := queue.blocksFetcher.blocksPerPeriod
|
||||
machineSlots := make([]types.Slot, 0)
|
||||
for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
|
||||
queue.smm.addStateMachine(i).setState(stateSkipped)
|
||||
|
||||
@@ -22,7 +22,7 @@ func (q *blocksQueue) resetFromFork(fork *forkData) error {
|
||||
return errors.New("invalid first block in fork data")
|
||||
}
|
||||
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.smm.removeAllStateMachines(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (q *blocksQueue) resetFromFork(fork *forkData) error {
|
||||
// long periods with skipped slots).
|
||||
func (q *blocksQueue) resetFromSlot(ctx context.Context, startSlot types.Slot) error {
|
||||
// Shift start position of all the machines except for the last one.
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerPeriod
|
||||
if err := q.smm.removeAllStateMachines(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -77,7 +78,11 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
if blk.Block().IsBlinded() {
|
||||
blk, err = s.cfg.executionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get reconstruct full bellatrix block from blinded body")
|
||||
if errors.Is(err, execution.EmptyBlockHash) {
|
||||
log.WithError(err).Warn("Could not reconstruct block from header with syncing execution client. Waiting to complete syncing")
|
||||
} else {
|
||||
log.WithError(err).Error("Could not get reconstruct full block from blinded body")
|
||||
}
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,15 +4,23 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/operation"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) blsToExecutionChangeSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
func (s *Service) blsToExecutionChangeSubscriber(_ context.Context, msg proto.Message) error {
|
||||
blsMsg, ok := msg.(*ethpb.SignedBLSToExecutionChange)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", ðpb.SignedBLSToExecutionChange{}, msg)
|
||||
}
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.BLSToExecutionChangeReceived,
|
||||
Data: &opfeed.BLSToExecutionChangeReceivedData{
|
||||
Change: blsMsg,
|
||||
},
|
||||
})
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(blsMsg)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -60,7 +60,11 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, 6, len(h.allTopics()))
|
||||
topic = fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, 7, len(h.allTopics()))
|
||||
|
||||
// Remove multiple topics
|
||||
topic = fmt.Sprintf(p2p.AttesterSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
@@ -76,7 +80,7 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, true, h.digestExists(digest))
|
||||
assert.Equal(t, 3, len(h.allTopics()))
|
||||
assert.Equal(t, 4, len(h.allTopics()))
|
||||
|
||||
// Remove remaining topics.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
@@ -91,6 +95,10 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, false, h.digestExists(digest))
|
||||
assert.Equal(t, 0, len(h.allTopics()))
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ your flag since you're going to invert the flag in a later step. i.e you will us
|
||||
later. For example, `--enable-my-feature`. Additionally, [create a feature flag tracking issue](https://github.com/prysmaticlabs/prysm/issues/new?template=feature_flag.md)
|
||||
for your feature using the appropriate issue template.
|
||||
2. Use the feature throughout the application to enable your new functionality and be sure to write
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new funcitonality without losing
|
||||
tests carefully and thoughtfully to ensure you have tested all of your new functionality without losing
|
||||
coverage on the existing functionality. This is considered an opt-in feature flag. Example usage:
|
||||
```go
|
||||
func someExistingMethod(ctx context.Context) error {
|
||||
@@ -58,4 +58,4 @@ the config value in shared/featureconfig/config.go.
|
||||
deprecate the opt-out feature flag, delete the config field from shared/featureconfig/config.go,
|
||||
delete any deprecated / obsolete code paths.
|
||||
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
Deprecated flags are deleted upon each major semver point release. Ex: v1, v2, v3.
|
||||
|
||||
@@ -149,8 +149,6 @@ type BeaconChainConfig struct {
|
||||
AltairForkEpoch types.Epoch `yaml:"ALTAIR_FORK_EPOCH" spec:"true"` // AltairForkEpoch is used to represent the assigned fork epoch for altair.
|
||||
BellatrixForkVersion []byte `yaml:"BELLATRIX_FORK_VERSION" spec:"true"` // BellatrixForkVersion is used to represent the fork version for bellatrix.
|
||||
BellatrixForkEpoch types.Epoch `yaml:"BELLATRIX_FORK_EPOCH" spec:"true"` // BellatrixForkEpoch is used to represent the assigned fork epoch for bellatrix.
|
||||
ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding.
|
||||
ShardingForkEpoch types.Epoch `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding.
|
||||
CapellaForkVersion []byte `yaml:"CAPELLA_FORK_VERSION" spec:"true"` // CapellaForkVersion is used to represent the fork version for capella.
|
||||
CapellaForkEpoch types.Epoch `yaml:"CAPELLA_FORK_EPOCH" spec:"true"` // CapellaForkEpoch is used to represent the assigned fork epoch for capella.
|
||||
|
||||
@@ -221,26 +219,18 @@ func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
|
||||
func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]types.Epoch {
|
||||
fvs := map[[fieldparams.VersionLength]byte]types.Epoch{}
|
||||
// Set Genesis fork data.
|
||||
fvs[bytesutil.ToBytes4(b.GenesisForkVersion)] = b.GenesisEpoch
|
||||
// Set Altair fork data.
|
||||
fvs[bytesutil.ToBytes4(b.AltairForkVersion)] = b.AltairForkEpoch
|
||||
// Set Bellatrix fork data.
|
||||
fvs[bytesutil.ToBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch
|
||||
// Set Capella fork data.
|
||||
fvs[bytesutil.ToBytes4(b.CapellaForkVersion)] = b.CapellaForkEpoch
|
||||
return fvs
|
||||
}
|
||||
|
||||
func configForkNames(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]string {
|
||||
fvn := map[[fieldparams.VersionLength]byte]string{}
|
||||
// Set Genesis fork data.
|
||||
fvn[bytesutil.ToBytes4(b.GenesisForkVersion)] = "phase0"
|
||||
// Set Altair fork data.
|
||||
fvn[bytesutil.ToBytes4(b.AltairForkVersion)] = "altair"
|
||||
// Set Bellatrix fork data.
|
||||
fvn[bytesutil.ToBytes4(b.BellatrixForkVersion)] = "bellatrix"
|
||||
// Set Capella fork data.
|
||||
fvn[bytesutil.ToBytes4(b.CapellaForkVersion)] = "capella"
|
||||
return fvn
|
||||
}
|
||||
|
||||
@@ -170,8 +170,6 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.AltairForkEpoch, actual.AltairForkEpoch)
|
||||
require.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion)
|
||||
require.DeepEqual(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch)
|
||||
require.DeepEqual(t, expected.ShardingForkVersion, actual.ShardingForkVersion)
|
||||
require.DeepEqual(t, expected.ShardingForkEpoch, actual.ShardingForkEpoch)
|
||||
require.DeepEqual(t, expected.ForkVersionSchedule, actual.ForkVersionSchedule)
|
||||
require.DeepEqual(t, expected.SafetyDecay, actual.SafetyDecay)
|
||||
require.DeepEqual(t, expected.TimelySourceFlagIndex, actual.TimelySourceFlagIndex)
|
||||
|
||||
@@ -10,7 +10,6 @@ func InteropConfig() *BeaconChainConfig {
|
||||
c.AltairForkVersion = []byte{1, 0, 0, 235}
|
||||
c.BellatrixForkVersion = []byte{2, 0, 0, 235}
|
||||
c.CapellaForkVersion = []byte{3, 0, 0, 235}
|
||||
c.ShardingForkVersion = []byte{4, 0, 0, 235}
|
||||
|
||||
c.InitializeForkSchedule()
|
||||
return c
|
||||
|
||||
@@ -196,10 +196,9 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("DEPOSIT_NETWORK_ID: %d", cfg.DepositNetworkID),
|
||||
fmt.Sprintf("ALTAIR_FORK_EPOCH: %d", cfg.AltairForkEpoch),
|
||||
fmt.Sprintf("ALTAIR_FORK_VERSION: %#x", cfg.AltairForkVersion),
|
||||
fmt.Sprintf("CAPELLA_FORK_VERSION: %#x", cfg.CapellaForkVersion),
|
||||
fmt.Sprintf("BELLATRIX_FORK_EPOCH: %d", cfg.BellatrixForkEpoch),
|
||||
fmt.Sprintf("BELLATRIX_FORK_VERSION: %#x", cfg.BellatrixForkVersion),
|
||||
fmt.Sprintf("SHARDING_FORK_EPOCH: %d", cfg.ShardingForkEpoch),
|
||||
fmt.Sprintf("SHARDING_FORK_VERSION: %#x", cfg.ShardingForkVersion),
|
||||
fmt.Sprintf("INACTIVITY_SCORE_BIAS: %d", cfg.InactivityScoreBias),
|
||||
fmt.Sprintf("INACTIVITY_SCORE_RECOVERY_RATE: %d", cfg.InactivityScoreRecoveryRate),
|
||||
fmt.Sprintf("TERMINAL_TOTAL_DIFFICULTY: %s", cfg.TerminalTotalDifficulty),
|
||||
|
||||
@@ -107,13 +107,15 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
|
||||
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
|
||||
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.TerminalTotalDifficulty, actual.TerminalTotalDifficulty, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.AltairForkEpoch, actual.AltairForkEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.SqrRootSlotsPerEpoch, actual.SqrRootSlotsPerEpoch, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.AltairForkVersion, actual.AltairForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.TerminalTotalDifficulty, actual.TerminalTotalDifficulty, "%s: TerminalTotalDifficulty", name)
|
||||
assert.Equal(t, expected.AltairForkEpoch, actual.AltairForkEpoch, "%s: AltairForkEpoch", name)
|
||||
assert.Equal(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch, "%s: BellatrixForkEpoch", name)
|
||||
assert.Equal(t, expected.CapellaForkEpoch, actual.CapellaForkEpoch, "%s: CapellaForkEpoch", name)
|
||||
assert.Equal(t, expected.SqrRootSlotsPerEpoch, actual.SqrRootSlotsPerEpoch, "%s: SqrRootSlotsPerEpoch", name)
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, expected.AltairForkVersion, actual.AltairForkVersion, "%s: AltairForkVersion", name)
|
||||
assert.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion, "%s: BellatrixForkVersion", name)
|
||||
assert.DeepEqual(t, expected.CapellaForkVersion, actual.CapellaForkVersion, "%s: CapellaForkVersion", name)
|
||||
|
||||
assertYamlFieldsMatch(t, name, fields, expected, actual)
|
||||
}
|
||||
@@ -324,6 +326,9 @@ func fieldsFromYamls(t *testing.T, fps []string) []string {
|
||||
require.NoError(t, yaml.Unmarshal(yamlFile, &m))
|
||||
|
||||
for k := range m {
|
||||
if k == "SHARDING_FORK_VERSION" || k == "SHARDING_FORK_EPOCH" {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
|
||||
@@ -210,8 +210,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
BellatrixForkEpoch: mainnetBellatrixForkEpoch,
|
||||
CapellaForkVersion: []byte{3, 0, 0, 0},
|
||||
CapellaForkEpoch: math.MaxUint64,
|
||||
ShardingForkVersion: []byte{4, 0, 0, 0},
|
||||
ShardingForkEpoch: math.MaxUint64,
|
||||
|
||||
// New values introduced in Altair hard fork 1.
|
||||
// Participation flag indices.
|
||||
@@ -273,21 +271,21 @@ func MainnetTestConfig() *BeaconChainConfig {
|
||||
return mn
|
||||
}
|
||||
|
||||
// FillTestVersions replaces the byte in the last position of each fork version
|
||||
// so that
|
||||
// FillTestVersions replaces the fork schedule in the given BeaconChainConfig with test values, using the given
|
||||
// byte argument as the high byte (common across forks).
|
||||
func FillTestVersions(c *BeaconChainConfig, b byte) {
|
||||
c.GenesisForkVersion = make([]byte, fieldparams.VersionLength)
|
||||
c.AltairForkVersion = make([]byte, fieldparams.VersionLength)
|
||||
c.BellatrixForkVersion = make([]byte, fieldparams.VersionLength)
|
||||
c.ShardingForkVersion = make([]byte, fieldparams.VersionLength)
|
||||
c.CapellaForkVersion = make([]byte, fieldparams.VersionLength)
|
||||
|
||||
c.GenesisForkVersion[fieldparams.VersionLength-1] = b
|
||||
c.AltairForkVersion[fieldparams.VersionLength-1] = b
|
||||
c.BellatrixForkVersion[fieldparams.VersionLength-1] = b
|
||||
c.ShardingForkVersion[fieldparams.VersionLength-1] = b
|
||||
c.CapellaForkVersion[fieldparams.VersionLength-1] = b
|
||||
|
||||
c.GenesisForkVersion[0] = 0
|
||||
c.AltairForkVersion[0] = 1
|
||||
c.BellatrixForkVersion[0] = 2
|
||||
c.ShardingForkVersion[0] = 3
|
||||
c.CapellaForkVersion[0] = 3
|
||||
}
|
||||
|
||||
@@ -89,8 +89,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.BellatrixForkEpoch = math.MaxUint64
|
||||
minimalConfig.CapellaForkVersion = []byte{3, 0, 0, 1}
|
||||
minimalConfig.CapellaForkEpoch = math.MaxUint64
|
||||
minimalConfig.ShardingForkVersion = []byte{4, 0, 0, 1}
|
||||
minimalConfig.ShardingForkEpoch = math.MaxUint64
|
||||
|
||||
minimalConfig.SyncCommitteeSize = 32
|
||||
minimalConfig.InactivityScoreBias = 4
|
||||
|
||||
6
config/params/testdata/e2e_config.yaml
vendored
6
config/params/testdata/e2e_config.yaml
vendored
@@ -38,9 +38,9 @@ ALTAIR_FORK_EPOCH: 6 # Override for e2e
|
||||
# Bellatrix
|
||||
BELLATRIX_FORK_VERSION: 0x020000fd
|
||||
BELLATRIX_FORK_EPOCH: 8
|
||||
# Sharding
|
||||
SHARDING_FORK_VERSION: 0x030000fd
|
||||
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x030000fd
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -135,8 +135,6 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.AltairForkEpoch, actual.AltairForkEpoch)
|
||||
require.DeepEqual(t, expected.BellatrixForkVersion, actual.BellatrixForkVersion)
|
||||
require.DeepEqual(t, expected.BellatrixForkEpoch, actual.BellatrixForkEpoch)
|
||||
require.DeepEqual(t, expected.ShardingForkVersion, actual.ShardingForkVersion)
|
||||
require.DeepEqual(t, expected.ShardingForkEpoch, actual.ShardingForkEpoch)
|
||||
require.DeepEqual(t, expected.ForkVersionSchedule, actual.ForkVersionSchedule)
|
||||
require.DeepEqual(t, expected.SafetyDecay, actual.SafetyDecay)
|
||||
require.DeepEqual(t, expected.TimelySourceFlagIndex, actual.TimelySourceFlagIndex)
|
||||
|
||||
@@ -44,7 +44,6 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.AltairForkVersion = []byte{1, 0, 0, 253}
|
||||
e2eConfig.BellatrixForkVersion = []byte{2, 0, 0, 253}
|
||||
e2eConfig.CapellaForkVersion = []byte{3, 0, 0, 253}
|
||||
e2eConfig.ShardingForkVersion = []byte{4, 0, 0, 253}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
return e2eConfig
|
||||
@@ -83,7 +82,6 @@ func E2EMainnetTestConfig() *BeaconChainConfig {
|
||||
e2eConfig.AltairForkVersion = []byte{1, 0, 0, 254}
|
||||
e2eConfig.BellatrixForkVersion = []byte{2, 0, 0, 254}
|
||||
e2eConfig.CapellaForkVersion = []byte{3, 0, 0, 254}
|
||||
e2eConfig.ShardingForkVersion = []byte{4, 0, 0, 254}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
return e2eConfig
|
||||
|
||||
@@ -37,7 +37,6 @@ func PraterConfig() *BeaconChainConfig {
|
||||
cfg.AltairForkEpoch = 36660
|
||||
cfg.AltairForkVersion = []byte{0x1, 0x0, 0x10, 0x20}
|
||||
cfg.CapellaForkVersion = []byte{0x3, 0x0, 0x10, 0x20}
|
||||
cfg.ShardingForkVersion = []byte{0x4, 0x0, 0x10, 0x20}
|
||||
cfg.BellatrixForkEpoch = 112260
|
||||
cfg.BellatrixForkVersion = []byte{0x2, 0x0, 0x10, 0x20}
|
||||
cfg.TerminalTotalDifficulty = "10790000"
|
||||
|
||||
@@ -33,8 +33,10 @@ func SepoliaConfig() *BeaconChainConfig {
|
||||
cfg.AltairForkVersion = []byte{0x90, 0x00, 0x00, 0x70}
|
||||
cfg.BellatrixForkEpoch = 100
|
||||
cfg.BellatrixForkVersion = []byte{0x90, 0x00, 0x00, 0x71}
|
||||
cfg.CapellaForkVersion = []byte{0x90, 0x00, 0x00, 0x72}
|
||||
cfg.TerminalTotalDifficulty = "17000000000000000"
|
||||
cfg.DepositContractAddress = "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D"
|
||||
cfg.CapellaForkVersion = []byte{0x90, 0x00, 0x00, 0x72}
|
||||
cfg.InitializeForkSchedule()
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -22,10 +22,6 @@ func (m SignedBeaconBlock) Block() interfaces.BeaconBlock {
|
||||
return m.BeaconBlock
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) SetBlock(interfaces.BeaconBlock) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (SignedBeaconBlock) Signature() [field_params.BLSSignatureLength]byte {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -2,7 +2,15 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["bytes.go"],
|
||||
srcs = [
|
||||
"bits.go",
|
||||
"bytes.go",
|
||||
"bytes_go120.go",
|
||||
"bytes_legacy.go",
|
||||
"eth_types.go",
|
||||
"hex.go",
|
||||
"integers.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/encoding/bytesutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@@ -15,7 +23,13 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["bytes_test.go"],
|
||||
srcs = [
|
||||
"bits_test.go",
|
||||
"bytes_test.go",
|
||||
"eth_types_test.go",
|
||||
"hex_test.go",
|
||||
"integers_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
90
encoding/bytesutil/bits.go
Normal file
90
encoding/bytesutil/bits.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package bytesutil
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SetBit sets the index `i` of bitlist `b` to 1.
|
||||
// It grows and returns a longer bitlist with 1 set
|
||||
// if index `i` is out of range.
|
||||
func SetBit(b []byte, i int) []byte {
|
||||
if i >= len(b)*8 {
|
||||
h := (i + (8 - i%8)) / 8
|
||||
b = append(b, make([]byte, h-len(b))...)
|
||||
}
|
||||
|
||||
bit := uint8(1 << (i % 8))
|
||||
b[i/8] |= bit
|
||||
return b
|
||||
}
|
||||
|
||||
// ClearBit clears the index `i` of bitlist `b`.
|
||||
// Returns the original bitlist if the index `i`
|
||||
// is out of range.
|
||||
func ClearBit(b []byte, i int) []byte {
|
||||
if i >= len(b)*8 || i < 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
bit := uint8(1 << (i % 8))
|
||||
b[i/8] &^= bit
|
||||
return b
|
||||
}
|
||||
|
||||
// MakeEmptyBitlists returns an empty bitlist with
|
||||
// input size `i`.
|
||||
func MakeEmptyBitlists(i int) []byte {
|
||||
return make([]byte, (i+(8-i%8))/8)
|
||||
}
|
||||
|
||||
// HighestBitIndex returns the index of the highest
|
||||
// bit set from bitlist `b`.
|
||||
func HighestBitIndex(b []byte) (int, error) {
|
||||
if len(b) == 0 {
|
||||
return 0, errors.New("input list can't be empty or nil")
|
||||
}
|
||||
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
if b[i] == 0 {
|
||||
continue
|
||||
}
|
||||
return bits.Len8(b[i]) + (i * 8), nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestBitIndexAt returns the index of the highest
|
||||
// bit set from bitlist `b` that is at `index` (inclusive).
|
||||
func HighestBitIndexAt(b []byte, index int) (int, error) {
|
||||
bLength := len(b)
|
||||
if b == nil || bLength == 0 {
|
||||
return 0, errors.New("input list can't be empty or nil")
|
||||
}
|
||||
if index < 0 {
|
||||
return 0, errors.Errorf("index is negative: %d", index)
|
||||
}
|
||||
|
||||
start := index / 8
|
||||
if start >= bLength {
|
||||
start = bLength - 1
|
||||
}
|
||||
|
||||
mask := byte(1<<(index%8) - 1)
|
||||
for i := start; i >= 0; i-- {
|
||||
if index/8 > i {
|
||||
mask = 0xff
|
||||
}
|
||||
masked := b[i] & mask
|
||||
minBitsMasked := bits.Len8(masked)
|
||||
if b[i] == 0 || (minBitsMasked == 0 && index/8 <= i) {
|
||||
continue
|
||||
}
|
||||
|
||||
return minBitsMasked + (i * 8), nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
139
encoding/bytesutil/bits_test.go
Normal file
139
encoding/bytesutil/bits_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package bytesutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestSetBit(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c []byte
|
||||
}{
|
||||
{[]byte{0b00000000}, 1, []byte{0b00000010}},
|
||||
{[]byte{0b00000010}, 7, []byte{0b10000010}},
|
||||
{[]byte{0b10000010}, 9, []byte{0b10000010, 0b00000010}},
|
||||
{[]byte{0b10000010}, 27, []byte{0b10000010, 0b00000000, 0b00000000, 0b00001000}},
|
||||
{[]byte{0b10000010, 0b00000000}, 8, []byte{0b10000010, 0b00000001}},
|
||||
{[]byte{0b10000010, 0b00000000}, 31, []byte{0b10000010, 0b00000000, 0b00000000, 0b10000000}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.c, bytesutil.SetBit(tt.a, tt.b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearBit(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c []byte
|
||||
}{
|
||||
{[]byte{0b00000000}, 1, []byte{0b00000000}},
|
||||
{[]byte{0b00000010}, 1, []byte{0b00000000}},
|
||||
{[]byte{0b10000010}, 1, []byte{0b10000000}},
|
||||
{[]byte{0b10000010}, 8, []byte{0b10000010}},
|
||||
{[]byte{0b10000010, 0b00001111}, 7, []byte{0b00000010, 0b00001111}},
|
||||
{[]byte{0b10000010, 0b00001111}, 10, []byte{0b10000010, 0b00001011}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.c, bytesutil.ClearBit(tt.a, tt.b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeEmptyBitlists(t *testing.T) {
|
||||
tests := []struct {
|
||||
a int
|
||||
b int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{2, 1},
|
||||
{7, 1},
|
||||
{8, 2},
|
||||
{15, 2},
|
||||
{16, 3},
|
||||
{100, 13},
|
||||
{104, 14},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.b, len(bytesutil.MakeEmptyBitlists(tt.a)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighestBitIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
error bool
|
||||
}{
|
||||
{nil, 0, true},
|
||||
{[]byte{}, 0, true},
|
||||
{[]byte{0b00000001}, 1, false},
|
||||
{[]byte{0b10100101}, 8, false},
|
||||
{[]byte{0x00, 0x00}, 0, false},
|
||||
{[]byte{0xff, 0xa0}, 16, false},
|
||||
{[]byte{12, 34, 56, 78}, 31, false},
|
||||
{[]byte{255, 255, 255, 255}, 32, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
i, err := bytesutil.HighestBitIndex(tt.a)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.b, i)
|
||||
} else {
|
||||
assert.ErrorContains(t, "input list can't be empty or nil", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighestBitIndexBelow(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c int
|
||||
error bool
|
||||
}{
|
||||
{nil, 0, 0, true},
|
||||
{[]byte{}, 0, 0, true},
|
||||
{[]byte{0b00010001}, 0, 0, false},
|
||||
{[]byte{0b00010001}, 1, 1, false},
|
||||
{[]byte{0b00010001}, 2, 1, false},
|
||||
{[]byte{0b00010001}, 4, 1, false},
|
||||
{[]byte{0b00010001}, 5, 5, false},
|
||||
{[]byte{0b00010001}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 0, 0, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 1, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 2, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 4, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 5, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 15, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 16, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 9, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 10, 10, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 11, 10, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 14, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 15, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 24, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010, 0b10000000}, 23, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010, 0b10000000}, 24, 24, false},
|
||||
{[]byte{0b00000000, 0b00000001, 0b00000011}, 17, 17, false},
|
||||
{[]byte{0b00000000, 0b00000001, 0b00000011}, 18, 18, false},
|
||||
{[]byte{12, 34, 56, 78}, 1000, 31, false},
|
||||
{[]byte{255, 255, 255, 255}, 1000, 32, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
i, err := bytesutil.HighestBitIndexAt(tt.a, tt.b)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.c, i)
|
||||
} else {
|
||||
assert.ErrorContains(t, "input list can't be empty or nil", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,131 +2,9 @@
|
||||
package bytesutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/bits"
|
||||
"regexp"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var hexRegex = regexp.MustCompile("^0x[0-9a-fA-F]+$")
|
||||
|
||||
// ToBytes returns integer x to bytes in little-endian format at the specified length.
|
||||
// Spec defines similar method uint_to_bytes(n: uint) -> bytes, which is equivalent to ToBytes(n, 8).
|
||||
func ToBytes(x uint64, length int) []byte {
|
||||
if length < 0 {
|
||||
length = 0
|
||||
}
|
||||
makeLength := length
|
||||
if length < 8 {
|
||||
makeLength = 8
|
||||
}
|
||||
bytes := make([]byte, makeLength)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes[:length]
|
||||
}
|
||||
|
||||
// Bytes1 returns integer x to bytes in little-endian format, x.to_bytes(1, 'little').
|
||||
func Bytes1(x uint64) []byte {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes[:1]
|
||||
}
|
||||
|
||||
// Bytes2 returns integer x to bytes in little-endian format, x.to_bytes(2, 'little').
|
||||
func Bytes2(x uint64) []byte {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes[:2]
|
||||
}
|
||||
|
||||
// Bytes3 returns integer x to bytes in little-endian format, x.to_bytes(3, 'little').
|
||||
func Bytes3(x uint64) []byte {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes[:3]
|
||||
}
|
||||
|
||||
// Bytes4 returns integer x to bytes in little-endian format, x.to_bytes(4, 'little').
|
||||
func Bytes4(x uint64) []byte {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes[:4]
|
||||
}
|
||||
|
||||
// Bytes8 returns integer x to bytes in little-endian format, x.to_bytes(8, 'little').
|
||||
func Bytes8(x uint64) []byte {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes
|
||||
}
|
||||
|
||||
// Bytes32 returns integer x to bytes in little-endian format, x.to_bytes(32, 'little').
|
||||
func Bytes32(x uint64) []byte {
|
||||
bytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(bytes, x)
|
||||
return bytes
|
||||
}
|
||||
|
||||
// FromBytes4 returns an integer which is stored in the little-endian format(4, 'little')
|
||||
// from a byte array.
|
||||
func FromBytes4(x []byte) uint64 {
|
||||
if len(x) < 4 {
|
||||
return 0
|
||||
}
|
||||
empty4bytes := make([]byte, 4)
|
||||
return binary.LittleEndian.Uint64(append(x[:4], empty4bytes...))
|
||||
}
|
||||
|
||||
// FromBytes8 returns an integer which is stored in the little-endian format(8, 'little')
|
||||
// from a byte array.
|
||||
func FromBytes8(x []byte) uint64 {
|
||||
if len(x) < 8 {
|
||||
return 0
|
||||
}
|
||||
return binary.LittleEndian.Uint64(x)
|
||||
}
|
||||
|
||||
// ToBytes4 is a convenience method for converting a byte slice to a fix
|
||||
// sized 4 byte array. This method will truncate the input if it is larger
|
||||
// than 4 bytes.
|
||||
func ToBytes4(x []byte) [4]byte {
|
||||
var y [4]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes20 is a convenience method for converting a byte slice to a fix
|
||||
// sized 20 byte array. This method will truncate the input if it is larger
|
||||
// than 20 bytes.
|
||||
func ToBytes20(x []byte) [20]byte {
|
||||
var y [20]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes32 is a convenience method for converting a byte slice to a fix
|
||||
// sized 32 byte array. This method will truncate the input if it is larger
|
||||
// than 32 bytes.
|
||||
func ToBytes32(x []byte) [32]byte {
|
||||
var y [32]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes48 is a convenience method for converting a byte slice to a fix
|
||||
// sized 48 byte array. This method will truncate the input if it is larger
|
||||
// than 48 bytes.
|
||||
func ToBytes48(x []byte) [48]byte {
|
||||
var y [48]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes48Array is a convenience method for converting an array of
|
||||
// byte slices to an array of fixed-sized byte arrays.
|
||||
func ToBytes48Array(x [][]byte) [][48]byte {
|
||||
@@ -137,39 +15,12 @@ func ToBytes48Array(x [][]byte) [][48]byte {
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes64 is a convenience method for converting a byte slice to a fix
|
||||
// sized 64 byte array. This method will truncate the input if it is larger
|
||||
// than 64 bytes.
|
||||
func ToBytes64(x []byte) [64]byte {
|
||||
var y [64]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes96 is a convenience method for converting a byte slice to a fix
|
||||
// sized 96 byte array. This method will truncate the input if it is larger
|
||||
// than 96 bytes.
|
||||
func ToBytes96(x []byte) [96]byte {
|
||||
var y [96]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBool is a convenience method for converting a byte to a bool.
|
||||
// This method will use the first bit of the 0 byte to generate the returned value.
|
||||
func ToBool(x byte) bool {
|
||||
return x&1 == 1
|
||||
}
|
||||
|
||||
// FromBytes2 returns an integer which is stored in the little-endian format(2, 'little')
|
||||
// from a byte array.
|
||||
func FromBytes2(x []byte) uint16 {
|
||||
if len(x) < 2 {
|
||||
return 0
|
||||
}
|
||||
return binary.LittleEndian.Uint16(x[:2])
|
||||
}
|
||||
|
||||
// FromBool is a convenience method for converting a bool to a byte.
|
||||
// This method will use the first bit to generate the returned value.
|
||||
func FromBool(x bool) byte {
|
||||
@@ -203,16 +54,6 @@ func Trunc(x []byte) []byte {
|
||||
return x
|
||||
}
|
||||
|
||||
// ToLowInt64 returns the lowest 8 bytes interpreted as little endian.
|
||||
func ToLowInt64(x []byte) int64 {
|
||||
if len(x) < 8 {
|
||||
return 0
|
||||
}
|
||||
// Use the first 8 bytes.
|
||||
x = x[:8]
|
||||
return int64(binary.LittleEndian.Uint64(x)) // lint:ignore uintcast -- A negative number might be the expected result.
|
||||
}
|
||||
|
||||
// SafeCopyRootAtIndex takes a copy of an 32-byte slice in a slice of byte slices. Returns error if index out of range.
|
||||
func SafeCopyRootAtIndex(input [][]byte, idx uint64) ([]byte, error) {
|
||||
if input == nil {
|
||||
@@ -227,7 +68,7 @@ func SafeCopyRootAtIndex(input [][]byte, idx uint64) ([]byte, error) {
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// SafeCopyBytes will copy and return a non-nil byte array, otherwise it returns nil.
|
||||
// SafeCopyBytes will copy and return a non-nil byte slice, otherwise it returns nil.
|
||||
func SafeCopyBytes(cp []byte) []byte {
|
||||
if cp != nil {
|
||||
copied := make([]byte, len(cp))
|
||||
@@ -237,7 +78,7 @@ func SafeCopyBytes(cp []byte) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SafeCopy2dBytes will copy and return a non-nil 2d byte array, otherwise it returns nil.
|
||||
// SafeCopy2dBytes will copy and return a non-nil 2d byte slice, otherwise it returns nil.
|
||||
func SafeCopy2dBytes(ary [][]byte) [][]byte {
|
||||
if ary != nil {
|
||||
copied := make([][]byte, len(ary))
|
||||
@@ -249,7 +90,7 @@ func SafeCopy2dBytes(ary [][]byte) [][]byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SafeCopy2d32Bytes will copy and return a non-nil 2d byte array, otherwise it returns nil.
|
||||
// SafeCopy2d32Bytes will copy and return a non-nil 2d byte slice, otherwise it returns nil.
|
||||
func SafeCopy2d32Bytes(ary [][32]byte) [][32]byte {
|
||||
if ary != nil {
|
||||
copied := make([][32]byte, len(ary))
|
||||
@@ -270,159 +111,12 @@ func ReverseBytes32Slice(arr [][32]byte) [][32]byte {
|
||||
// PadTo pads a byte slice to the given size. If the byte slice is larger than the given size, the
|
||||
// original slice is returned.
|
||||
func PadTo(b []byte, size int) []byte {
|
||||
if len(b) > size {
|
||||
if len(b) >= size {
|
||||
return b
|
||||
}
|
||||
return append(b, make([]byte, size-len(b))...)
|
||||
}
|
||||
|
||||
// SetBit sets the index `i` of bitlist `b` to 1.
|
||||
// It grows and returns a longer bitlist with 1 set
|
||||
// if index `i` is out of range.
|
||||
func SetBit(b []byte, i int) []byte {
|
||||
if i >= len(b)*8 {
|
||||
h := (i + (8 - i%8)) / 8
|
||||
b = append(b, make([]byte, h-len(b))...)
|
||||
}
|
||||
|
||||
bit := uint8(1 << (i % 8))
|
||||
b[i/8] |= bit
|
||||
return b
|
||||
}
|
||||
|
||||
// ClearBit clears the index `i` of bitlist `b`.
|
||||
// Returns the original bitlist if the index `i`
|
||||
// is out of range.
|
||||
func ClearBit(b []byte, i int) []byte {
|
||||
if i >= len(b)*8 || i < 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
bit := uint8(1 << (i % 8))
|
||||
b[i/8] &^= bit
|
||||
return b
|
||||
}
|
||||
|
||||
// MakeEmptyBitlists returns an empty bitlist with
|
||||
// input size `i`.
|
||||
func MakeEmptyBitlists(i int) []byte {
|
||||
return make([]byte, (i+(8-i%8))/8)
|
||||
}
|
||||
|
||||
// HighestBitIndex returns the index of the highest
|
||||
// bit set from bitlist `b`.
|
||||
func HighestBitIndex(b []byte) (int, error) {
|
||||
if len(b) == 0 {
|
||||
return 0, errors.New("input list can't be empty or nil")
|
||||
}
|
||||
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
if b[i] == 0 {
|
||||
continue
|
||||
}
|
||||
return bits.Len8(b[i]) + (i * 8), nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestBitIndexAt returns the index of the highest
|
||||
// bit set from bitlist `b` that is at `index` (inclusive).
|
||||
func HighestBitIndexAt(b []byte, index int) (int, error) {
|
||||
bLength := len(b)
|
||||
if b == nil || bLength == 0 {
|
||||
return 0, errors.New("input list can't be empty or nil")
|
||||
}
|
||||
if index < 0 {
|
||||
return 0, errors.Errorf("index is negative: %d", index)
|
||||
}
|
||||
|
||||
start := index / 8
|
||||
if start >= bLength {
|
||||
start = bLength - 1
|
||||
}
|
||||
|
||||
mask := byte(1<<(index%8) - 1)
|
||||
for i := start; i >= 0; i-- {
|
||||
if index/8 > i {
|
||||
mask = 0xff
|
||||
}
|
||||
masked := b[i] & mask
|
||||
minBitsMasked := bits.Len8(masked)
|
||||
if b[i] == 0 || (minBitsMasked == 0 && index/8 <= i) {
|
||||
continue
|
||||
}
|
||||
|
||||
return minBitsMasked + (i * 8), nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Uint32ToBytes4 is a convenience method for converting uint32 to a fix
|
||||
// sized 4 byte array in big endian order. Returns 4 byte array.
|
||||
func Uint32ToBytes4(i uint32) [4]byte {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, i)
|
||||
return ToBytes4(buf)
|
||||
}
|
||||
|
||||
// Uint64ToBytesLittleEndian conversion.
|
||||
func Uint64ToBytesLittleEndian(i uint64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, i)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Uint64ToBytesBigEndian conversion.
|
||||
func Uint64ToBytesBigEndian(i uint64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, i)
|
||||
return buf
|
||||
}
|
||||
|
||||
// BytesToUint64BigEndian conversion. Returns 0 if empty bytes or byte slice with length less
|
||||
// than 8.
|
||||
func BytesToUint64BigEndian(b []byte) uint64 {
|
||||
if len(b) < 8 { // This will panic otherwise.
|
||||
return 0
|
||||
}
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
|
||||
// EpochToBytesLittleEndian conversion.
|
||||
func EpochToBytesLittleEndian(i types.Epoch) []byte {
|
||||
return Uint64ToBytesLittleEndian(uint64(i))
|
||||
}
|
||||
|
||||
// EpochToBytesBigEndian conversion.
|
||||
func EpochToBytesBigEndian(i types.Epoch) []byte {
|
||||
return Uint64ToBytesBigEndian(uint64(i))
|
||||
}
|
||||
|
||||
// BytesToEpochBigEndian conversion.
|
||||
func BytesToEpochBigEndian(b []byte) types.Epoch {
|
||||
return types.Epoch(BytesToUint64BigEndian(b))
|
||||
}
|
||||
|
||||
// SlotToBytesBigEndian conversion.
|
||||
func SlotToBytesBigEndian(i types.Slot) []byte {
|
||||
return Uint64ToBytesBigEndian(uint64(i))
|
||||
}
|
||||
|
||||
// BytesToSlotBigEndian conversion.
|
||||
func BytesToSlotBigEndian(b []byte) types.Slot {
|
||||
return types.Slot(BytesToUint64BigEndian(b))
|
||||
}
|
||||
|
||||
// IsHex checks whether the byte array is a hex number prefixed with '0x'.
|
||||
func IsHex(b []byte) bool {
|
||||
if b == nil {
|
||||
return false
|
||||
}
|
||||
return hexRegex.Match(b)
|
||||
}
|
||||
|
||||
// ReverseByteOrder Switch the endianness of a byte slice by reversing its order.
|
||||
// this function does not modify the actual input bytes.
|
||||
func ReverseByteOrder(input []byte) []byte {
|
||||
@@ -433,30 +127,3 @@ func ReverseByteOrder(input []byte) []byte {
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// ZeroRoot returns whether or not a root is of proper length and non-zero hash.
|
||||
func ZeroRoot(root []byte) bool {
|
||||
return string(make([]byte, fieldparams.RootLength)) == string(root)
|
||||
}
|
||||
|
||||
// IsRoot checks whether the byte array is a root.
|
||||
func IsRoot(root []byte) bool {
|
||||
return len(root) == fieldparams.RootLength
|
||||
}
|
||||
|
||||
// IsValidRoot checks whether the byte array is a valid root.
|
||||
func IsValidRoot(root []byte) bool {
|
||||
return IsRoot(root) && !ZeroRoot(root)
|
||||
}
|
||||
|
||||
// LittleEndianBytesToBigInt takes bytes of a number stored as little-endian and returns a big integer
|
||||
func LittleEndianBytesToBigInt(bytes []byte) *big.Int {
|
||||
// Integers are stored as little-endian, but big.Int expects big-endian. So we need to reverse the byte order before decoding.
|
||||
return new(big.Int).SetBytes(ReverseByteOrder(bytes))
|
||||
}
|
||||
|
||||
// BigIntToLittleEndianBytes takes a big integer and returns its bytes stored as little-endian
|
||||
func BigIntToLittleEndianBytes(bigInt *big.Int) []byte {
|
||||
// big.Int.Bytes() returns bytes in big-endian order, so we need to reverse the byte order
|
||||
return ReverseByteOrder(bigInt.Bytes())
|
||||
}
|
||||
|
||||
48
encoding/bytesutil/bytes_go120.go
Normal file
48
encoding/bytesutil/bytes_go120.go
Normal file
@@ -0,0 +1,48 @@
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package bytesutil
|
||||
|
||||
// These methods use go1.20 syntax to convert a byte slice to a fixed size array.
|
||||
|
||||
// ToBytes4 is a convenience method for converting a byte slice to a fix
|
||||
// sized 4 byte array. This method will truncate the input if it is larger
|
||||
// than 4 bytes.
|
||||
func ToBytes4(x []byte) [4]byte {
|
||||
return [4]byte(PadTo(x, 4))
|
||||
}
|
||||
|
||||
// ToBytes20 is a convenience method for converting a byte slice to a fix
|
||||
// sized 20 byte array. This method will truncate the input if it is larger
|
||||
// than 20 bytes.
|
||||
func ToBytes20(x []byte) [20]byte {
|
||||
return [20]byte(PadTo(x, 20))
|
||||
}
|
||||
|
||||
// ToBytes32 is a convenience method for converting a byte slice to a fix
|
||||
// sized 32 byte array. This method will truncate the input if it is larger
|
||||
// than 32 bytes.
|
||||
func ToBytes32(x []byte) [32]byte {
|
||||
return [32]byte(PadTo(x, 32))
|
||||
}
|
||||
|
||||
// ToBytes48 is a convenience method for converting a byte slice to a fix
|
||||
// sized 48 byte array. This method will truncate the input if it is larger
|
||||
// than 48 bytes.
|
||||
func ToBytes48(x []byte) [48]byte {
|
||||
return [48]byte(PadTo(x, 48))
|
||||
}
|
||||
|
||||
// ToBytes64 is a convenience method for converting a byte slice to a fix
|
||||
// sized 64 byte array. This method will truncate the input if it is larger
|
||||
// than 64 bytes.
|
||||
func ToBytes64(x []byte) [64]byte {
|
||||
return [64]byte(PadTo(x, 64))
|
||||
}
|
||||
|
||||
// ToBytes96 is a convenience method for converting a byte slice to a fix
|
||||
// sized 96 byte array. This method will truncate the input if it is larger
|
||||
// than 96 bytes.
|
||||
func ToBytes96(x []byte) [96]byte {
|
||||
return [96]byte(PadTo(x, 96))
|
||||
}
|
||||
61
encoding/bytesutil/bytes_legacy.go
Normal file
61
encoding/bytesutil/bytes_legacy.go
Normal file
@@ -0,0 +1,61 @@
|
||||
//go:build !go1.20
|
||||
// +build !go1.20
|
||||
|
||||
package bytesutil
|
||||
|
||||
// These methods use copy() to convert a byte slice to a fixed size array.
|
||||
// This approach is used for go1.19 and below.
|
||||
|
||||
// ToBytes4 is a convenience method for converting a byte slice to a fix
|
||||
// sized 4 byte array. This method will truncate the input if it is larger
|
||||
// than 4 bytes.
|
||||
func ToBytes4(x []byte) [4]byte {
|
||||
var y [4]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes20 is a convenience method for converting a byte slice to a fix
|
||||
// sized 20 byte array. This method will truncate the input if it is larger
|
||||
// than 20 bytes.
|
||||
func ToBytes20(x []byte) [20]byte {
|
||||
var y [20]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes32 is a convenience method for converting a byte slice to a fix
|
||||
// sized 32 byte array. This method will truncate the input if it is larger
|
||||
// than 32 bytes.
|
||||
func ToBytes32(x []byte) [32]byte {
|
||||
var y [32]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes48 is a convenience method for converting a byte slice to a fix
|
||||
// sized 48 byte array. This method will truncate the input if it is larger
|
||||
// than 48 bytes.
|
||||
func ToBytes48(x []byte) [48]byte {
|
||||
var y [48]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes64 is a convenience method for converting a byte slice to a fix
|
||||
// sized 64 byte array. This method will truncate the input if it is larger
|
||||
// than 64 bytes.
|
||||
func ToBytes64(x []byte) [64]byte {
|
||||
var y [64]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
|
||||
// ToBytes96 is a convenience method for converting a byte slice to a fix
|
||||
// sized 96 byte array. This method will truncate the input if it is larger
|
||||
// than 96 bytes.
|
||||
func ToBytes96(x []byte) [96]byte {
|
||||
var y [96]byte
|
||||
copy(y[:], x)
|
||||
return y
|
||||
}
|
||||
@@ -2,213 +2,13 @@ package bytesutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestToBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0}},
|
||||
{1, []byte{1}},
|
||||
{2, []byte{2}},
|
||||
{253, []byte{253}},
|
||||
{254, []byte{254}},
|
||||
{255, []byte{255}},
|
||||
{0, []byte{0, 0}},
|
||||
{1, []byte{1, 0}},
|
||||
{255, []byte{255, 0}},
|
||||
{256, []byte{0, 1}},
|
||||
{65534, []byte{254, 255}},
|
||||
{65535, []byte{255, 255}},
|
||||
{0, []byte{0, 0, 0}},
|
||||
{255, []byte{255, 0, 0}},
|
||||
{256, []byte{0, 1, 0}},
|
||||
{65535, []byte{255, 255, 0}},
|
||||
{65536, []byte{0, 0, 1}},
|
||||
{16777215, []byte{255, 255, 255}},
|
||||
{0, []byte{0, 0, 0, 0}},
|
||||
{256, []byte{0, 1, 0, 0}},
|
||||
{65536, []byte{0, 0, 1, 0}},
|
||||
{16777216, []byte{0, 0, 0, 1}},
|
||||
{16777217, []byte{1, 0, 0, 1}},
|
||||
{4294967295, []byte{255, 255, 255, 255}},
|
||||
{0, []byte{0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{16777216, []byte{0, 0, 0, 1, 0, 0, 0, 0}},
|
||||
{4294967296, []byte{0, 0, 0, 0, 1, 0, 0, 0}},
|
||||
{4294967297, []byte{1, 0, 0, 0, 1, 0, 0, 0}},
|
||||
{9223372036854775806, []byte{254, 255, 255, 255, 255, 255, 255, 127}},
|
||||
{9223372036854775807, []byte{255, 255, 255, 255, 255, 255, 255, 127}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.ToBytes(tt.a, len(tt.b))
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes1(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0}},
|
||||
{1, []byte{1}},
|
||||
{2, []byte{2}},
|
||||
{253, []byte{253}},
|
||||
{254, []byte{254}},
|
||||
{255, []byte{255}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes1(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes2(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0, 0}},
|
||||
{1, []byte{1, 0}},
|
||||
{255, []byte{255, 0}},
|
||||
{256, []byte{0, 1}},
|
||||
{65534, []byte{254, 255}},
|
||||
{65535, []byte{255, 255}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes2(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes3(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0, 0, 0}},
|
||||
{255, []byte{255, 0, 0}},
|
||||
{256, []byte{0, 1, 0}},
|
||||
{65535, []byte{255, 255, 0}},
|
||||
{65536, []byte{0, 0, 1}},
|
||||
{16777215, []byte{255, 255, 255}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes3(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes4(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0, 0, 0, 0}},
|
||||
{256, []byte{0, 1, 0, 0}},
|
||||
{65536, []byte{0, 0, 1, 0}},
|
||||
{16777216, []byte{0, 0, 0, 1}},
|
||||
{16777217, []byte{1, 0, 0, 1}},
|
||||
{4294967295, []byte{255, 255, 255, 255}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes4(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes8(t *testing.T) {
|
||||
tests := []struct {
|
||||
a uint64
|
||||
b []byte
|
||||
}{
|
||||
{0, []byte{0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{16777216, []byte{0, 0, 0, 1, 0, 0, 0, 0}},
|
||||
{4294967296, []byte{0, 0, 0, 0, 1, 0, 0, 0}},
|
||||
{4294967297, []byte{1, 0, 0, 0, 1, 0, 0, 0}},
|
||||
{9223372036854775806, []byte{254, 255, 255, 255, 255, 255, 255, 127}},
|
||||
{9223372036854775807, []byte{255, 255, 255, 255, 255, 255, 255, 127}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.Bytes8(tt.a)
|
||||
assert.DeepEqual(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBool(t *testing.T) {
|
||||
tests := []byte{
|
||||
0,
|
||||
1,
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.ToBool(tt)
|
||||
c := bytesutil.FromBool(b)
|
||||
assert.Equal(t, tt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBytes2(t *testing.T) {
|
||||
tests := []uint64{
|
||||
0,
|
||||
1776,
|
||||
96726,
|
||||
(1 << 16) - 1,
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.ToBytes(tt, 2)
|
||||
c := bytesutil.FromBytes2(b)
|
||||
assert.Equal(t, uint16(tt), c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBytes4(t *testing.T) {
|
||||
tests := []uint64{
|
||||
0,
|
||||
1776,
|
||||
96726,
|
||||
4290997,
|
||||
4294967295, // 2^32 - 1
|
||||
4294967200,
|
||||
3894948296,
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.ToBytes(tt, 4)
|
||||
c := bytesutil.FromBytes4(b)
|
||||
if c != tt {
|
||||
t.Errorf("Wanted %d but got %d", tt, c)
|
||||
}
|
||||
assert.Equal(t, tt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromBytes8(t *testing.T) {
|
||||
tests := []uint64{
|
||||
0,
|
||||
1776,
|
||||
96726,
|
||||
4290997,
|
||||
922376854775806,
|
||||
42893720984775807,
|
||||
18446744073709551615,
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := bytesutil.ToBytes(tt, 8)
|
||||
c := bytesutil.FromBytes8(b)
|
||||
assert.Equal(t, tt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
@@ -243,166 +43,6 @@ func TestReverse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBit(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c []byte
|
||||
}{
|
||||
{[]byte{0b00000000}, 1, []byte{0b00000010}},
|
||||
{[]byte{0b00000010}, 7, []byte{0b10000010}},
|
||||
{[]byte{0b10000010}, 9, []byte{0b10000010, 0b00000010}},
|
||||
{[]byte{0b10000010}, 27, []byte{0b10000010, 0b00000000, 0b00000000, 0b00001000}},
|
||||
{[]byte{0b10000010, 0b00000000}, 8, []byte{0b10000010, 0b00000001}},
|
||||
{[]byte{0b10000010, 0b00000000}, 31, []byte{0b10000010, 0b00000000, 0b00000000, 0b10000000}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.c, bytesutil.SetBit(tt.a, tt.b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearBit(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c []byte
|
||||
}{
|
||||
{[]byte{0b00000000}, 1, []byte{0b00000000}},
|
||||
{[]byte{0b00000010}, 1, []byte{0b00000000}},
|
||||
{[]byte{0b10000010}, 1, []byte{0b10000000}},
|
||||
{[]byte{0b10000010}, 8, []byte{0b10000010}},
|
||||
{[]byte{0b10000010, 0b00001111}, 7, []byte{0b00000010, 0b00001111}},
|
||||
{[]byte{0b10000010, 0b00001111}, 10, []byte{0b10000010, 0b00001011}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.c, bytesutil.ClearBit(tt.a, tt.b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeEmptyBitfields(t *testing.T) {
|
||||
tests := []struct {
|
||||
a int
|
||||
b int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{2, 1},
|
||||
{7, 1},
|
||||
{8, 2},
|
||||
{15, 2},
|
||||
{16, 3},
|
||||
{100, 13},
|
||||
{104, 14},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.DeepEqual(t, tt.b, len(bytesutil.MakeEmptyBitlists(tt.a)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighestBitIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
error bool
|
||||
}{
|
||||
{nil, 0, true},
|
||||
{[]byte{}, 0, true},
|
||||
{[]byte{0b00000001}, 1, false},
|
||||
{[]byte{0b10100101}, 8, false},
|
||||
{[]byte{0x00, 0x00}, 0, false},
|
||||
{[]byte{0xff, 0xa0}, 16, false},
|
||||
{[]byte{12, 34, 56, 78}, 31, false},
|
||||
{[]byte{255, 255, 255, 255}, 32, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
i, err := bytesutil.HighestBitIndex(tt.a)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.b, i)
|
||||
} else {
|
||||
assert.ErrorContains(t, "input list can't be empty or nil", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighestBitIndexBelow(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b int
|
||||
c int
|
||||
error bool
|
||||
}{
|
||||
{nil, 0, 0, true},
|
||||
{[]byte{}, 0, 0, true},
|
||||
{[]byte{0b00010001}, 0, 0, false},
|
||||
{[]byte{0b00010001}, 1, 1, false},
|
||||
{[]byte{0b00010001}, 2, 1, false},
|
||||
{[]byte{0b00010001}, 4, 1, false},
|
||||
{[]byte{0b00010001}, 5, 5, false},
|
||||
{[]byte{0b00010001}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 0, 0, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 1, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 2, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 4, 1, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 5, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 15, 5, false},
|
||||
{[]byte{0b00010001, 0b00000000}, 16, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 8, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 9, 5, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 10, 10, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 11, 10, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 14, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 15, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010}, 24, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010, 0b10000000}, 23, 14, false},
|
||||
{[]byte{0b00010001, 0b00100010, 0b10000000}, 24, 24, false},
|
||||
{[]byte{0b00000000, 0b00000001, 0b00000011}, 17, 17, false},
|
||||
{[]byte{0b00000000, 0b00000001, 0b00000011}, 18, 18, false},
|
||||
{[]byte{12, 34, 56, 78}, 1000, 31, false},
|
||||
{[]byte{255, 255, 255, 255}, 1000, 32, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
i, err := bytesutil.HighestBitIndexAt(tt.a, tt.b)
|
||||
if !tt.error {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.c, i)
|
||||
} else {
|
||||
assert.ErrorContains(t, "input list can't be empty or nil", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUint64ToBytes_RoundTrip(t *testing.T) {
|
||||
for i := uint64(0); i < 10000; i++ {
|
||||
b := bytesutil.Uint64ToBytesBigEndian(i)
|
||||
if got := bytesutil.BytesToUint64BigEndian(b); got != i {
|
||||
t.Error("Round trip did not match original value")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHex(t *testing.T) {
|
||||
tests := []struct {
|
||||
a []byte
|
||||
b bool
|
||||
}{
|
||||
{nil, false},
|
||||
{[]byte(""), false},
|
||||
{[]byte("0x"), false},
|
||||
{[]byte("0x0"), true},
|
||||
{[]byte("foo"), false},
|
||||
{[]byte("1234567890abcDEF"), false},
|
||||
{[]byte("XYZ4567890abcDEF1234567890abcDEF1234567890abcDEF1234567890abcDEF"), false},
|
||||
{[]byte("0x1234567890abcDEF1234567890abcDEF1234567890abcDEF1234567890abcDEF"), true},
|
||||
{[]byte("1234567890abcDEF1234567890abcDEF1234567890abcDEF1234567890abcDEF"), false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
isHex := bytesutil.IsHex(tt.a)
|
||||
assert.Equal(t, tt.b, isHex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafeCopyRootAtIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -521,94 +161,6 @@ func TestSafeCopy2d32Bytes(t *testing.T) {
|
||||
assert.DeepEqual(t, input, output)
|
||||
}
|
||||
|
||||
func TestZeroRoot(t *testing.T) {
|
||||
input := make([]byte, fieldparams.RootLength)
|
||||
output := bytesutil.ZeroRoot(input)
|
||||
assert.Equal(t, true, output)
|
||||
copy(input[2:], "a")
|
||||
copy(input[3:], "b")
|
||||
output = bytesutil.ZeroRoot(input)
|
||||
assert.Equal(t, false, output)
|
||||
}
|
||||
|
||||
func TestIsRoot(t *testing.T) {
|
||||
input := make([]byte, fieldparams.RootLength)
|
||||
output := bytesutil.IsRoot(input)
|
||||
assert.Equal(t, true, output)
|
||||
}
|
||||
|
||||
func TestIsValidRoot(t *testing.T) {
|
||||
|
||||
zeroRoot := make([]byte, fieldparams.RootLength)
|
||||
|
||||
validRoot := make([]byte, fieldparams.RootLength)
|
||||
validRoot[0] = 'a'
|
||||
|
||||
wrongLengthRoot := make([]byte, fieldparams.RootLength-4)
|
||||
wrongLengthRoot[0] = 'a'
|
||||
|
||||
type args struct {
|
||||
root []byte
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Is ZeroRoot",
|
||||
args: args{
|
||||
root: zeroRoot,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Is ValidRoot",
|
||||
args: args{
|
||||
root: validRoot,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Is NonZeroRoot but not length 32",
|
||||
args: args{
|
||||
root: wrongLengthRoot,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := bytesutil.IsValidRoot(tt.args.root)
|
||||
require.Equal(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUint32ToBytes4(t *testing.T) {
|
||||
tests := []struct {
|
||||
value uint32
|
||||
want [4]byte
|
||||
}{
|
||||
{
|
||||
value: 0x01000000,
|
||||
want: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
value: 0x00000001,
|
||||
want: [4]byte{0, 0, 0, 1},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("0x%08x", tt.value), func(t *testing.T) {
|
||||
if got := bytesutil.Uint32ToBytes4(tt.value); !bytes.Equal(got[:], tt.want[:]) {
|
||||
t.Errorf("Uint32ToBytes4() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToBytes48Array(t *testing.T) {
|
||||
tests := []struct {
|
||||
a [][]byte
|
||||
@@ -655,18 +207,44 @@ func TestToBytes20(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLittleEndianBytesToBigInt(t *testing.T) {
|
||||
bytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bytes, 1234567890)
|
||||
converted := bytesutil.LittleEndianBytesToBigInt(bytes)
|
||||
expected := new(big.Int).SetInt64(1234567890)
|
||||
assert.DeepEqual(t, expected, converted)
|
||||
func BenchmarkToBytes32(b *testing.B) {
|
||||
x := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
for i := 0; i < b.N; i++ {
|
||||
bytesutil.ToBytes32(x)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigIntToLittleEndianBytes(t *testing.T) {
|
||||
expected := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(expected, 1234567890)
|
||||
bigInt := new(big.Int).SetUint64(1234567890)
|
||||
converted := bytesutil.BigIntToLittleEndianBytes(bigInt)
|
||||
assert.DeepEqual(t, expected, converted)
|
||||
func TestFromBytes48Array(t *testing.T) {
|
||||
tests := []struct {
|
||||
a [][]byte
|
||||
b [][48]byte
|
||||
}{
|
||||
{[][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{0}}},
|
||||
{[][]byte{{253, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{253}}},
|
||||
{[][]byte{{254, 255, 255, 255, 255, 255, 255, 127, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
[][48]byte{{254, 255, 255, 255, 255, 255, 255, 127}}},
|
||||
{[][]byte{{255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255}},
|
||||
[][48]byte{{255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
a := bytesutil.FromBytes48Array(tt.b)
|
||||
assert.DeepEqual(t, tt.a, a)
|
||||
}
|
||||
}
|
||||
|
||||
46
encoding/bytesutil/eth_types.go
Normal file
46
encoding/bytesutil/eth_types.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package bytesutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// EpochToBytesLittleEndian conversion.
|
||||
func EpochToBytesLittleEndian(i types.Epoch) []byte {
|
||||
return Uint64ToBytesLittleEndian(uint64(i))
|
||||
}
|
||||
|
||||
// EpochToBytesBigEndian conversion.
|
||||
func EpochToBytesBigEndian(i types.Epoch) []byte {
|
||||
return Uint64ToBytesBigEndian(uint64(i))
|
||||
}
|
||||
|
||||
// BytesToEpochBigEndian conversion.
|
||||
func BytesToEpochBigEndian(b []byte) types.Epoch {
|
||||
return types.Epoch(BytesToUint64BigEndian(b))
|
||||
}
|
||||
|
||||
// SlotToBytesBigEndian conversion.
|
||||
func SlotToBytesBigEndian(i types.Slot) []byte {
|
||||
return Uint64ToBytesBigEndian(uint64(i))
|
||||
}
|
||||
|
||||
// BytesToSlotBigEndian conversion.
|
||||
func BytesToSlotBigEndian(b []byte) types.Slot {
|
||||
return types.Slot(BytesToUint64BigEndian(b))
|
||||
}
|
||||
|
||||
// ZeroRoot returns whether or not a root is of proper length and non-zero hash.
|
||||
func ZeroRoot(root []byte) bool {
|
||||
return string(make([]byte, fieldparams.RootLength)) == string(root)
|
||||
}
|
||||
|
||||
// IsRoot checks whether the byte array is a root.
|
||||
func IsRoot(root []byte) bool {
|
||||
return len(root) == fieldparams.RootLength
|
||||
}
|
||||
|
||||
// IsValidRoot checks whether the byte array is a valid root.
|
||||
func IsValidRoot(root []byte) bool {
|
||||
return IsRoot(root) && !ZeroRoot(root)
|
||||
}
|
||||
75
encoding/bytesutil/eth_types_test.go
Normal file
75
encoding/bytesutil/eth_types_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package bytesutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestZeroRoot(t *testing.T) {
|
||||
input := make([]byte, fieldparams.RootLength)
|
||||
output := bytesutil.ZeroRoot(input)
|
||||
assert.Equal(t, true, output)
|
||||
copy(input[2:], "a")
|
||||
copy(input[3:], "b")
|
||||
output = bytesutil.ZeroRoot(input)
|
||||
assert.Equal(t, false, output)
|
||||
}
|
||||
|
||||
func TestIsRoot(t *testing.T) {
|
||||
input := make([]byte, fieldparams.RootLength)
|
||||
output := bytesutil.IsRoot(input)
|
||||
assert.Equal(t, true, output)
|
||||
}
|
||||
|
||||
func TestIsValidRoot(t *testing.T) {
|
||||
|
||||
zeroRoot := make([]byte, fieldparams.RootLength)
|
||||
|
||||
validRoot := make([]byte, fieldparams.RootLength)
|
||||
validRoot[0] = 'a'
|
||||
|
||||
wrongLengthRoot := make([]byte, fieldparams.RootLength-4)
|
||||
wrongLengthRoot[0] = 'a'
|
||||
|
||||
type args struct {
|
||||
root []byte
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Is ZeroRoot",
|
||||
args: args{
|
||||
root: zeroRoot,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Is ValidRoot",
|
||||
args: args{
|
||||
root: validRoot,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Is NonZeroRoot but not length 32",
|
||||
args: args{
|
||||
root: wrongLengthRoot,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := bytesutil.IsValidRoot(tt.args.root)
|
||||
require.Equal(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
13
encoding/bytesutil/hex.go
Normal file
13
encoding/bytesutil/hex.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package bytesutil
|
||||
|
||||
import "regexp"
|
||||
|
||||
var hexRegex = regexp.MustCompile("^0x[0-9a-fA-F]+$")
|
||||
|
||||
// IsHex checks whether the byte array is a hex number prefixed with '0x'.
|
||||
func IsHex(b []byte) bool {
|
||||
if b == nil {
|
||||
return false
|
||||
}
|
||||
return hexRegex.Match(b)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user