* save changes

* add dep

* add changes

* add latest changes

* push changes

* hack it for mainnet

* fix deps

* update it

* add changes

* fix e2e

* revert it

* gaz

* remove log

* preston's review

* clear up

* add more logs

* fix nonce gaps

* make it better

* fix blobs

* set value

* add support for deneb scenario paths

* update to fix scenario

* go mod

* clean up

* fix up

* reduce cog complexity

* lint

* remove

* go sec

* Update testing/endtoend/evaluators/fork.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/ssz_proto_library.bzl

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fix

* radek's review

* make it atomic

* gaz

* add deneb case

* remove deneb activation

* change e2e yaml

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
This commit is contained in:
Nishant Das
2023-11-08 20:24:23 +08:00
committed by GitHub
parent afaeff9d4c
commit 4a515c36e6
22 changed files with 690 additions and 118 deletions

View File

@@ -29,6 +29,6 @@ const (
MaxBlobsPerBlock = 6 // MaxBlobsPerBlock defines the maximum number of blobs with respect to consensus rule can be included in a block.
MaxBlobCommitmentsPerBlock = 16 // MaxBlobCommitmentsPerBlock defines the theoretical limit of blobs can be included in a block.
LogMaxBlobCommitments = 4 // Log_2 of MaxBlobCommitmentsPerBlock
BlobLength = 4 // BlobLength defines the byte length of a blob.
BlobSize = 128 // defined to match blob.size in bazel ssz codegen
BlobLength = 131072 // BlobLength defines the byte length of a blob.
BlobSize = 131072 // defined to match blob.size in bazel ssz codegen
)

View File

@@ -182,3 +182,21 @@ func SortedForkVersions(forkSchedule map[[4]byte]primitives.Epoch) [][4]byte {
})
return sortedVersions
}
// LastForkEpoch returns the last valid fork epoch that exists in our
// fork schedule.
func LastForkEpoch() primitives.Epoch {
fSchedule := params.BeaconConfig().ForkVersionSchedule
sortedForkVersions := SortedForkVersions(fSchedule)
lastValidEpoch := primitives.Epoch(0)
numOfVersions := len(sortedForkVersions)
for i := numOfVersions - 1; i >= 0; i-- {
v := sortedForkVersions[i]
fEpoch := fSchedule[v]
if fEpoch != math.MaxUint64 {
lastValidEpoch = fEpoch
break
}
}
return lastValidEpoch
}

View File

@@ -385,3 +385,101 @@ func TestNextForkData(t *testing.T) {
})
}
}
func TestLastForkEpoch(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
tests := []struct {
name string
setConfg func()
wantedEpoch primitives.Epoch
}{
{
name: "no schedule",
wantedEpoch: 0,
setConfg: func() {
cfg = cfg.Copy()
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
params.OverrideBeaconConfig(cfg)
},
},
{
name: "genesis fork",
wantedEpoch: 0,
setConfg: func() {
cfg = cfg.Copy()
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
{'A', 'B', 'C', 'D'}: 0,
}
params.OverrideBeaconConfig(cfg)
},
},
{
name: "altair post fork",
wantedEpoch: 10,
setConfg: func() {
cfg = cfg.Copy()
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
{'A', 'B', 'C', 'D'}: 0,
{'A', 'B', 'C', 'F'}: 10,
}
params.OverrideBeaconConfig(cfg)
},
},
{
name: "3 forks, 1 valid fork",
wantedEpoch: 5,
setConfg: func() {
cfg = cfg.Copy()
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
{'A', 'B', 'C', 'D'}: 5,
{'A', 'B', 'C', 'F'}: math.MaxUint64,
{'A', 'B', 'C', 'Z'}: math.MaxUint64,
}
params.OverrideBeaconConfig(cfg)
},
},
{
name: "3 forks, 2 valid ones",
wantedEpoch: 10,
setConfg: func() {
cfg = cfg.Copy()
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
{'A', 'B', 'C', 'D'}: 0,
{'A', 'B', 'C', 'F'}: 10,
{'A', 'B', 'C', 'Z'}: math.MaxUint64,
}
params.OverrideBeaconConfig(cfg)
},
},
{
name: "3 forks",
wantedEpoch: 100,
setConfg: func() {
cfg = cfg.Copy()
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
{'A', 'B', 'C', 'D'}: 0,
{'A', 'B', 'C', 'F'}: 10,
{'A', 'B', 'C', 'Z'}: 100,
}
params.OverrideBeaconConfig(cfg)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.setConfg()
fEpoch := LastForkEpoch()
if fEpoch != tt.wantedEpoch {
t.Errorf("LastForkEpoch() fork epoch = %v, want %v", fEpoch, tt.wantedEpoch)
}
})
}
}

View File

@@ -15,7 +15,7 @@ mainnet = {
"previous_epoch_attestations.max": "4096", # MAX_ATTESTATIONS * SLOTS_PER_EPOCH
"current_epoch_attestations.max": "4096", # MAX_ATTESTATIONS * SLOTS_PER_EPOCH
"slashings.size": "8192", # EPOCHS_PER_SLASHINGS_VECTOR
"sync_committee_bits.size": "512", #SYNC_COMMITTEE_SIZE
"sync_committee_bits.size": "512", # SYNC_COMMITTEE_SIZE
"sync_committee_bytes.size": "64",
"sync_committee_bits.type": "github.com/prysmaticlabs/go-bitfield.Bitvector512",
"sync_committee_aggregate_bytes.size": "16",

View File

@@ -74,6 +74,7 @@ common_deps = [
"//testing/slasher/simulator:go_default_library",
"//testing/util:go_default_library",
"//validator/helpers:go_default_library",
"//network/forks:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",

View File

@@ -267,6 +267,8 @@ func (node *BeaconNode) Start(ctx context.Context) error {
fmt.Sprintf("--%s=%s", cmdshared.BootstrapNode.Name, enr),
fmt.Sprintf("--%s=%s", cmdshared.VerbosityFlag.Name, "debug"),
fmt.Sprintf("--%s=%d", flags.BlockBatchLimitBurstFactor.Name, 8),
fmt.Sprintf("--%s=%d", flags.BlobBatchLimitBurstFactor.Name, 8),
fmt.Sprintf("--%s=%d", flags.BlobBatchLimit.Name, 32),
fmt.Sprintf("--%s=%s", cmdshared.ChainConfigFileFlag.Name, cfgPath),
"--" + cmdshared.ValidatorMonitorIndicesFlag.Name + "=1",
"--" + cmdshared.ValidatorMonitorIndicesFlag.Name + "=2",

View File

@@ -15,6 +15,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/testing/endtoend/components/eth1",
visibility = ["//testing/endtoend:__subpackages__"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//contracts/deposit:go_default_library",
"//crypto/rand:go_default_library",
@@ -31,8 +32,10 @@ go_library(
"@com_github_ethereum_go_ethereum//accounts/keystore:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_mariusvanderwijden_fuzzyvm//filler:go_default_library",
"@com_github_mariusvanderwijden_tx_fuzz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -3,6 +3,7 @@ package eth1
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"fmt"
"math/big"
mathRand "math/rand"
@@ -14,8 +15,12 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
e2e "github.com/prysmaticlabs/prysm/v4/testing/endtoend/params"
@@ -23,6 +28,8 @@ import (
"golang.org/x/sync/errgroup"
)
var fundedAccount *keystore.Key
type TransactionGenerator struct {
keystore string
seed int64
@@ -46,8 +53,9 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
defer client.Close()
seed := t.seed
newGen := rand.NewDeterministicGenerator()
if seed == 0 {
seed = rand.NewDeterministicGenerator().Int63()
seed = newGen.Int63()
logrus.Infof("Seed for transaction generator is: %d", seed)
}
// Set seed so that all transactions can be
@@ -62,6 +70,11 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
if err != nil {
return err
}
newKey := keystore.NewKeyForDirectICAP(newGen)
if err := fundAccount(client, mineKey, newKey); err != nil {
return err
}
fundedAccount = newKey
rnd := make([]byte, 10000)
// #nosec G404
_, err = mathRand.Read(rnd)
@@ -78,10 +91,12 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
case <-ctx.Done():
return nil
case <-ticker.C:
err := SendTransaction(client, mineKey.PrivateKey, f, gasPrice, mineKey.Address.String(), 100, false)
backend := ethclient.NewClient(client)
err = SendTransaction(client, mineKey.PrivateKey, f, gasPrice, mineKey.Address.String(), 100, backend, false)
if err != nil {
return err
}
backend.Close()
}
}
}
@@ -91,15 +106,13 @@ func (s *TransactionGenerator) Started() <-chan struct{} {
return s.started
}
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler, gasPrice *big.Int, addr string, N uint64, al bool) error {
backend := ethclient.NewClient(client)
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler, gasPrice *big.Int, addr string, N uint64, backend *ethclient.Client, al bool) error {
sender := common.HexToAddress(addr)
chainid, err := backend.ChainID(context.Background())
nonce, err := backend.PendingNonceAt(context.Background(), fundedAccount.Address)
if err != nil {
return err
}
nonce, err := backend.PendingNonceAt(context.Background(), sender)
chainid, err := backend.ChainID(context.Background())
if err != nil {
return err
}
@@ -111,10 +124,55 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
gasPrice = expectedPrice
}
g, _ := errgroup.WithContext(context.Background())
txs := make([]*types.Transaction, 10)
for i := uint64(0); i < 10; i++ {
index := i
g.Go(func() error {
tx, err := RandomBlobTx(client, f, fundedAccount.Address, nonce+index, gasPrice, chainid, al)
if err != nil {
logrus.WithError(err).Error("Could not create blob tx")
// In the event the transaction constructed is not valid, we continue with the routine
// rather than complete stop it.
//nolint:nilerr
return nil
}
signedTx, err := types.SignTx(tx, types.NewCancunSigner(chainid), fundedAccount.PrivateKey)
if err != nil {
logrus.WithError(err).Error("Could not sign blob tx")
// We continue on in the event there is a reason we can't sign this
// transaction(unlikely).
//nolint:nilerr
return nil
}
txs[index] = signedTx
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
for _, tx := range txs {
if tx == nil {
continue
}
err = backend.SendTransaction(context.Background(), tx)
if err != nil {
// Do nothing
continue
}
}
nonce, err = backend.PendingNonceAt(context.Background(), sender)
if err != nil {
return err
}
txs = make([]*types.Transaction, N)
for i := uint64(0); i < N; i++ {
index := i
g.Go(func() error {
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, gasPrice, nil, al)
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, gasPrice, chainid, al)
if err != nil {
// In the event the transaction constructed is not valid, we continue with the routine
// rather than complete stop it.
@@ -128,17 +186,24 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
//nolint:nilerr
return nil
}
err = backend.SendTransaction(context.Background(), signedTx)
if err != nil {
// We continue on if the constructed transaction is invalid
// and can't be submitted on chain.
//nolint:nilerr
return nil
}
txs[index] = signedTx
return nil
})
}
return g.Wait()
if err := g.Wait(); err != nil {
return err
}
for _, tx := range txs {
if tx == nil {
continue
}
err = backend.SendTransaction(context.Background(), tx)
if err != nil {
// Do nothing
continue
}
}
return nil
}
// Pause pauses the component and its underlying process.
@@ -156,3 +221,231 @@ func (t *TransactionGenerator) Stop() error {
t.cancel()
return nil
}
func RandomBlobTx(rpc *rpc.Client, f *filler.Filler, sender common.Address, nonce uint64, gasPrice, chainID *big.Int, al bool) (*types.Transaction, error) {
// Set fields if non-nil
if rpc != nil {
client := ethclient.NewClient(rpc)
var err error
if gasPrice == nil {
gasPrice, err = client.SuggestGasPrice(context.Background())
if err != nil {
gasPrice = big.NewInt(1)
}
}
if chainID == nil {
chainID, err = client.ChainID(context.Background())
if err != nil {
chainID = big.NewInt(1)
}
}
}
gas := uint64(100000)
to := randomAddress()
code := txfuzz.RandomCode(f)
value := big.NewInt(0)
if len(code) > 128 {
code = code[:128]
}
mod := 2
if al {
mod = 1
}
switch f.Byte() % byte(mod) {
case 0:
// 4844 transaction without AL
tip, feecap, err := getCaps(rpc, gasPrice)
if err != nil {
return nil, err
}
data, err := randomBlobData()
if err != nil {
return nil, err
}
return New4844Tx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, make(types.AccessList, 0)), nil
case 1:
// 4844 transaction with AL
tx := types.NewTransaction(nonce, to, value, gas, gasPrice, code)
al, err := txfuzz.CreateAccessList(rpc, tx, sender)
if err != nil {
return nil, err
}
tip, feecap, err := getCaps(rpc, gasPrice)
if err != nil {
return nil, err
}
data, err := randomBlobData()
if err != nil {
return nil, err
}
return New4844Tx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, *al), nil
}
return nil, errors.New("asdf")
}
func New4844Tx(nonce uint64, to *common.Address, gasLimit uint64, chainID, tip, feeCap, value *big.Int, code []byte, blobFeeCap *big.Int, blobData []byte, al types.AccessList) *types.Transaction {
blobs, comms, proofs, versionedHashes, err := EncodeBlobs(blobData)
if err != nil {
panic(err)
}
tx := types.NewTx(&types.BlobTx{
ChainID: uint256.MustFromBig(chainID),
Nonce: nonce,
GasTipCap: uint256.MustFromBig(tip),
GasFeeCap: uint256.MustFromBig(feeCap),
Gas: gasLimit,
To: *to,
Value: uint256.MustFromBig(value),
Data: code,
AccessList: al,
BlobFeeCap: uint256.MustFromBig(blobFeeCap),
BlobHashes: versionedHashes,
Sidecar: &types.BlobTxSidecar{
Blobs: blobs,
Commitments: comms,
Proofs: proofs,
},
})
return tx
}
func encodeBlobs(data []byte) []kzg4844.Blob {
blobs := []kzg4844.Blob{{}}
blobIndex := 0
fieldIndex := -1
numOfElems := fieldparams.BlobLength / 32
for i := 0; i < len(data); i += 31 {
fieldIndex++
if fieldIndex == numOfElems {
if blobIndex >= 1 {
break
}
blobs = append(blobs, kzg4844.Blob{})
blobIndex++
fieldIndex = 0
}
max := i + 31
if max > len(data) {
max = len(data)
}
copy(blobs[blobIndex][fieldIndex*32+1:], data[i:max])
}
return blobs
}
func EncodeBlobs(data []byte) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) {
var (
blobs = encodeBlobs(data)
commits []kzg4844.Commitment
proofs []kzg4844.Proof
versionedHashes []common.Hash
)
for _, blob := range blobs {
commit, err := kzg4844.BlobToCommitment(blob)
if err != nil {
return nil, nil, nil, nil, err
}
commits = append(commits, commit)
proof, err := kzg4844.ComputeBlobProof(blob, commit)
if err != nil {
return nil, nil, nil, nil, err
}
if err := kzg4844.VerifyBlobProof(blob, commit, proof); err != nil {
return nil, nil, nil, nil, err
}
proofs = append(proofs, proof)
versionedHashes = append(versionedHashes, kZGToVersionedHash(commit))
}
return blobs, commits, proofs, versionedHashes, nil
}
var blobCommitmentVersionKZG uint8 = 0x01
// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844
func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
h := sha256.Sum256(kzg[:])
h[0] = blobCommitmentVersionKZG
return h
}
func randomBlobData() ([]byte, error) {
// #nosec G404
size := mathRand.Intn(fieldparams.BlobSize)
data := make([]byte, size)
// #nosec G404
n, err := mathRand.Read(data)
if err != nil {
return nil, err
}
if n != size {
return nil, fmt.Errorf("could not create random blob data with size %d: %v", size, err)
}
return data, nil
}
func randomAddress() common.Address {
// #nosec G404
switch mathRand.Int31n(5) {
case 0, 1, 2:
b := make([]byte, 20)
// #nosec G404
_, err := mathRand.Read(b)
if err != nil {
panic(err)
}
return common.BytesToAddress(b)
case 3:
return common.Address{}
case 4:
return common.HexToAddress("0xb02A2EdA1b317FBd16760128836B0Ac59B560e9D")
}
return common.Address{}
}
func getCaps(rpc *rpc.Client, defaultGasPrice *big.Int) (*big.Int, *big.Int, error) {
if rpc == nil {
tip := new(big.Int).Mul(big.NewInt(1), big.NewInt(0).SetUint64(params.BeaconConfig().GweiPerEth))
if defaultGasPrice.Cmp(tip) >= 0 {
feeCap := new(big.Int).Sub(defaultGasPrice, tip)
return tip, feeCap, nil
}
return big.NewInt(0), defaultGasPrice, nil
}
client := ethclient.NewClient(rpc)
tip, err := client.SuggestGasTipCap(context.Background())
if err != nil {
return nil, nil, err
}
feeCap, err := client.SuggestGasPrice(context.Background())
return tip, feeCap, err
}
func fundAccount(client *rpc.Client, sourceKey, destKey *keystore.Key) error {
backend := ethclient.NewClient(client)
defer backend.Close()
nonce, err := backend.PendingNonceAt(context.Background(), sourceKey.Address)
if err != nil {
return err
}
chainid, err := backend.ChainID(context.Background())
if err != nil {
return err
}
expectedPrice, err := backend.SuggestGasPrice(context.Background())
if err != nil {
return err
}
val, ok := big.NewInt(0).SetString("10000000000000000000000000", 10)
if !ok {
return errors.New("could not set big int for value")
}
tx := types.NewTransaction(nonce, destKey.Address, val, 100000, expectedPrice, nil)
signedTx, err := types.SignTx(tx, types.NewLondonSigner(chainid), sourceKey.PrivateKey)
if err != nil {
return err
}
return backend.SendTransaction(context.Background(), signedTx)
}

View File

@@ -14,14 +14,14 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func e2eMinimal(t *testing.T, v int, cfgo ...types.E2EConfigOpt) *testRunner {
func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2EConfigOpt) *testRunner {
params.SetupTestConfigCleanup(t)
require.NoError(t, params.SetActive(types.StartAt(v, params.E2ETestConfig())))
require.NoError(t, params.SetActive(cfg))
require.NoError(t, e2eParams.Init(t, e2eParams.StandardBeaconCount))
// Run for 12 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 12
epochsToRun := 14
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
@@ -56,7 +56,7 @@ func e2eMinimal(t *testing.T, v int, cfgo ...types.E2EConfigOpt) *testRunner {
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.CapellaForkTransition,
// ev.DenebForkTransition, // TODO(12750): Enable this when geth main branch's engine API support.
ev.DenebForkTransition,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
@@ -103,7 +103,7 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
}
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
var err error
epochsToRun := 12
epochsToRun := 14
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
if longRunning {
epochsToRun, err = strconv.Atoi(epochStr)
@@ -132,7 +132,7 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.CapellaForkTransition,
// ev.DenebForkTransition, // TODO(12750): Enable this when geth main branch's engine API support.
ev.DenebForkTransition,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
@@ -188,7 +188,7 @@ func scenarioEvals() []types.Evaluator {
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.CapellaForkTransition,
// ev.DenebForkTransition, // TODO(12750): Enable this when geth main branch's engine API support.
ev.DenebForkTransition,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,
@@ -209,7 +209,7 @@ func scenarioEvalsMulti() []types.Evaluator {
ev.AltairForkTransition,
ev.BellatrixForkTransition,
ev.CapellaForkTransition,
// ev.DenebForkTransition, // TODO(12750): Enable this when geth main branch's engine API support.
ev.DenebForkTransition,
ev.APIGatewayV1Alpha1VerifyIntegrity,
ev.FinishedSyncing,
ev.AllNodesHaveSameHead,

View File

@@ -7,6 +7,7 @@ package endtoend
import (
"context"
"fmt"
"math"
"math/big"
"os"
"path"
@@ -18,6 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/api/client/beacon"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/network/forks"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -561,6 +563,11 @@ func (r *testRunner) scenarioRun() error {
// Blocking, wait period varies depending on number of validators.
r.waitForChainStart()
keypath, err := e2e.TestParams.Paths.MinerKeyPath()
require.NoError(t, err, "error getting miner key path from bazel static files in defaultEndToEndRun")
r.testTxGeneration(ctx, r.comHandler.group, keypath, []e2etypes.ComponentRunner{})
// Create GRPC connection to beacon nodes.
conns, closeConns, err := helpers.NewLocalConnections(ctx, e2e.TestParams.BeaconNodeCount)
require.NoError(t, err, "Cannot create local connections")
@@ -615,20 +622,36 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
Status *enginev1.PayloadStatus `json:"payloadStatus"`
PayloadId *enginev1.PayloadIDBytes `json:"payloadId"`
}
switch epoch {
case 11:
lastForkEpoch := forks.LastForkEpoch()
freezeStartEpoch := lastForkEpoch + 1
freezeEndEpoch := lastForkEpoch + 2
optimisticStartEpoch := lastForkEpoch + 6
optimisticEndEpoch := lastForkEpoch + 7
recoveryEpochStart, recoveryEpochEnd := lastForkEpoch+3, lastForkEpoch+4
secondRecoveryEpochStart, secondRecoveryEpochEnd := lastForkEpoch+8, lastForkEpoch+9
newPayloadMethod := "engine_newPayloadV3"
forkChoiceUpdatedMethod := "engine_forkchoiceUpdatedV3"
// Fallback if deneb is not set.
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 {
newPayloadMethod = "engine_newPayloadV2"
forkChoiceUpdatedMethod = "engine_forkchoiceUpdatedV2"
}
switch primitives.Epoch(epoch) {
case freezeStartEpoch:
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
return true
case 12:
case freezeEndEpoch:
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
return true
case 16:
case optimisticStartEpoch:
// Set it for prysm beacon node.
component, err := r.comHandler.eth1Proxy.ComponentAtIndex(0)
require.NoError(r.t, err)
component.(e2etypes.EngineProxy).AddRequestInterceptor("engine_newPayloadV2", func() interface{} {
component.(e2etypes.EngineProxy).AddRequestInterceptor(newPayloadMethod, func() interface{} {
return &enginev1.PayloadStatus{
Status: enginev1.PayloadStatus_SYNCING,
LatestValidHash: make([]byte, 32),
@@ -639,7 +662,7 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
// Set it for lighthouse beacon node.
component, err = r.comHandler.eth1Proxy.ComponentAtIndex(2)
require.NoError(r.t, err)
component.(e2etypes.EngineProxy).AddRequestInterceptor("engine_newPayloadV2", func() interface{} {
component.(e2etypes.EngineProxy).AddRequestInterceptor(newPayloadMethod, func() interface{} {
return &enginev1.PayloadStatus{
Status: enginev1.PayloadStatus_SYNCING,
LatestValidHash: make([]byte, 32),
@@ -648,7 +671,7 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
return true
})
component.(e2etypes.EngineProxy).AddRequestInterceptor("engine_forkchoiceUpdatedV2", func() interface{} {
component.(e2etypes.EngineProxy).AddRequestInterceptor(forkChoiceUpdatedMethod, func() interface{} {
return &ForkchoiceUpdatedResponse{
Status: &enginev1.PayloadStatus{
Status: enginev1.PayloadStatus_SYNCING,
@@ -660,7 +683,7 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
return true
})
return true
case 17:
case optimisticEndEpoch:
evs := []e2etypes.Evaluator{ev.OptimisticSyncEnabled}
r.executeProvidedEvaluators(ec, epoch, []*grpc.ClientConn{conns[0]}, evs)
// Disable Interceptor
@@ -668,20 +691,21 @@ func (r *testRunner) multiScenarioMulticlient(ec *e2etypes.EvaluationContext, ep
require.NoError(r.t, err)
engineProxy, ok := component.(e2etypes.EngineProxy)
require.Equal(r.t, true, ok)
engineProxy.RemoveRequestInterceptor("engine_newPayloadV2")
engineProxy.ReleaseBackedUpRequests("engine_newPayloadV2")
engineProxy.RemoveRequestInterceptor(newPayloadMethod)
engineProxy.ReleaseBackedUpRequests(newPayloadMethod)
// Remove for lighthouse too
component, err = r.comHandler.eth1Proxy.ComponentAtIndex(2)
require.NoError(r.t, err)
engineProxy, ok = component.(e2etypes.EngineProxy)
require.Equal(r.t, true, ok)
engineProxy.RemoveRequestInterceptor("engine_newPayloadV2")
engineProxy.RemoveRequestInterceptor("engine_forkchoiceUpdatedV2")
engineProxy.ReleaseBackedUpRequests("engine_newPayloadV2")
engineProxy.RemoveRequestInterceptor(newPayloadMethod)
engineProxy.RemoveRequestInterceptor(forkChoiceUpdatedMethod)
engineProxy.ReleaseBackedUpRequests(newPayloadMethod)
return true
case 13, 14, 18, 19:
case recoveryEpochStart, recoveryEpochEnd,
secondRecoveryEpochStart, secondRecoveryEpochEnd:
// Allow 2 epochs for the network to finalize again.
return true
}
@@ -715,27 +739,44 @@ func (r *testRunner) eeOffline(_ *e2etypes.EvaluationContext, epoch uint64, _ []
// will test this with our optimistic sync evaluator to ensure everything works
// as expected.
func (r *testRunner) multiScenario(ec *e2etypes.EvaluationContext, epoch uint64, conns []*grpc.ClientConn) bool {
switch epoch {
case 11:
lastForkEpoch := forks.LastForkEpoch()
freezeStartEpoch := lastForkEpoch + 1
freezeEndEpoch := lastForkEpoch + 2
valOfflineStartEpoch := lastForkEpoch + 6
valOfflineEndEpoch := lastForkEpoch + 7
optimisticStartEpoch := lastForkEpoch + 11
optimisticEndEpoch := lastForkEpoch + 12
recoveryEpochStart, recoveryEpochEnd := lastForkEpoch+3, lastForkEpoch+4
secondRecoveryEpochStart, secondRecoveryEpochEnd := lastForkEpoch+8, lastForkEpoch+9
thirdRecoveryEpochStart, thirdRecoveryEpochEnd := lastForkEpoch+13, lastForkEpoch+14
newPayloadMethod := "engine_newPayloadV3"
// Fallback if deneb is not set.
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 {
newPayloadMethod = "engine_newPayloadV2"
}
switch primitives.Epoch(epoch) {
case freezeStartEpoch:
require.NoError(r.t, r.comHandler.beaconNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
return true
case 12:
case freezeEndEpoch:
require.NoError(r.t, r.comHandler.beaconNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
return true
case 16:
case valOfflineStartEpoch:
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.PauseAtIndex(1))
return true
case 17:
case valOfflineEndEpoch:
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(0))
require.NoError(r.t, r.comHandler.validatorNodes.ResumeAtIndex(1))
return true
case 21:
case optimisticStartEpoch:
component, err := r.comHandler.eth1Proxy.ComponentAtIndex(0)
require.NoError(r.t, err)
component.(e2etypes.EngineProxy).AddRequestInterceptor("engine_newPayloadV2", func() interface{} {
component.(e2etypes.EngineProxy).AddRequestInterceptor(newPayloadMethod, func() interface{} {
return &enginev1.PayloadStatus{
Status: enginev1.PayloadStatus_SYNCING,
LatestValidHash: make([]byte, 32),
@@ -744,7 +785,7 @@ func (r *testRunner) multiScenario(ec *e2etypes.EvaluationContext, epoch uint64,
return true
})
return true
case 22:
case optimisticEndEpoch:
evs := []e2etypes.Evaluator{ev.OptimisticSyncEnabled}
r.executeProvidedEvaluators(ec, epoch, []*grpc.ClientConn{conns[0]}, evs)
// Disable Interceptor
@@ -752,11 +793,13 @@ func (r *testRunner) multiScenario(ec *e2etypes.EvaluationContext, epoch uint64,
require.NoError(r.t, err)
engineProxy, ok := component.(e2etypes.EngineProxy)
require.Equal(r.t, true, ok)
engineProxy.RemoveRequestInterceptor("engine_newPayloadV2")
engineProxy.ReleaseBackedUpRequests("engine_newPayloadV2")
engineProxy.RemoveRequestInterceptor(newPayloadMethod)
engineProxy.ReleaseBackedUpRequests(newPayloadMethod)
return true
case 13, 14, 18, 19, 23, 24:
case recoveryEpochStart, recoveryEpochEnd,
secondRecoveryEpochStart, secondRecoveryEpochEnd,
thirdRecoveryEpochStart, thirdRecoveryEpochEnd:
// Allow 2 epochs for the network to finalize again.
return true
}

View File

@@ -62,5 +62,6 @@ go_library(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_exp//rand:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -731,7 +731,7 @@ func postEvaluation(beaconNodeIdx int, requests map[string]metadata) error {
if err := bb.UnmarshalSSZ(blindedBlockSsz); err != nil {
return errors.Wrap(err, "failed to unmarshal ssz")
}
} else {
} else if finalizedEpoch >= helpers.CapellaE2EForkEpoch && finalizedEpoch < helpers.DenebE2EForkEpoch {
b := &ethpb.SignedBeaconBlockCapella{}
if err := b.UnmarshalSSZ(blockSsz); err != nil {
return errors.Wrap(err, "failed to unmarshal ssz")
@@ -740,6 +740,15 @@ func postEvaluation(beaconNodeIdx int, requests map[string]metadata) error {
if err := bb.UnmarshalSSZ(blindedBlockSsz); err != nil {
return errors.Wrap(err, "failed to unmarshal ssz")
}
} else {
b := &ethpb.SignedBeaconBlockDeneb{}
if err := b.UnmarshalSSZ(blockSsz); err != nil {
return errors.Wrap(err, "failed to unmarshal ssz")
}
bb := &ethpb.SignedBlindedBeaconBlockDeneb{}
if err := bb.UnmarshalSSZ(blindedBlockSsz); err != nil {
return errors.Wrap(err, "failed to unmarshal ssz")
}
}
// verify that dependent root of proposer duties matches block header

View File

@@ -44,46 +44,9 @@ func optimisticSyncEnabled(_ *types.EvaluationContext, conns ...*grpc.ClientConn
if err = json.NewDecoder(httpResp.Body).Decode(&resp); err != nil {
return err
}
headSlot := uint64(0)
switch resp.Version {
case version.String(version.Phase0):
b := &shared.BeaconBlock{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return err
}
case version.String(version.Altair):
b := &shared.BeaconBlockAltair{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return err
}
case version.String(version.Bellatrix):
b := &shared.BeaconBlockBellatrix{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return err
}
case version.String(version.Capella):
b := &shared.BeaconBlockCapella{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return err
}
default:
return errors.New("no valid block type retrieved")
headSlot, err := retrieveHeadSlot(&resp)
if err != nil {
return err
}
currEpoch := slots.ToEpoch(primitives.Slot(headSlot))
startSlot, err := slots.EpochStart(currEpoch)
@@ -123,3 +86,58 @@ func optimisticSyncEnabled(_ *types.EvaluationContext, conns ...*grpc.ClientConn
}
return nil
}
func retrieveHeadSlot(resp *beacon.GetBlockV2Response) (uint64, error) {
headSlot := uint64(0)
var err error
switch resp.Version {
case version.String(version.Phase0):
b := &shared.BeaconBlock{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return 0, err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return 0, err
}
case version.String(version.Altair):
b := &shared.BeaconBlockAltair{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return 0, err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return 0, err
}
case version.String(version.Bellatrix):
b := &shared.BeaconBlockBellatrix{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return 0, err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return 0, err
}
case version.String(version.Capella):
b := &shared.BeaconBlockCapella{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return 0, err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return 0, err
}
case version.String(version.Deneb):
b := &shared.BeaconBlockDeneb{}
if err := json.Unmarshal(resp.Data.Message, b); err != nil {
return 0, err
}
headSlot, err = strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return 0, err
}
default:
return 0, errors.New("no valid block type retrieved")
}
return headSlot, nil
}

View File

@@ -47,6 +47,12 @@ var CapellaForkTransition = types.Evaluator{
Evaluation: capellaForkOccurs,
}
var DenebForkTransition = types.Evaluator{
Name: "deneb_fork_transition_%d",
Policy: policies.OnEpoch(helpers.DenebE2EForkEpoch),
Evaluation: denebForkOccurs,
}
func altairForkOccurs(_ *types.EvaluationContext, conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
@@ -176,3 +182,46 @@ func capellaForkOccurs(_ *types.EvaluationContext, conns ...*grpc.ClientConn) er
}
return nil
}
func denebForkOccurs(_ *types.EvaluationContext, conns ...*grpc.ClientConn) error {
conn := conns[0]
client := ethpb.NewBeaconNodeValidatorClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), streamDeadline)
defer cancel()
stream, err := client.StreamBlocksAltair(ctx, &ethpb.StreamBlocksRequest{VerifiedOnly: true})
if err != nil {
return errors.Wrap(err, "failed to get stream")
}
fSlot, err := slots.EpochStart(helpers.DenebE2EForkEpoch)
if err != nil {
return err
}
if ctx.Err() == context.Canceled {
return errors.New("context canceled prematurely")
}
res, err := stream.Recv()
if err != nil {
return err
}
if res == nil || res.Block == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetBlock() == nil {
return errors.New("nil block returned by beacon node")
}
if res.GetDenebBlock() == nil {
return errors.Errorf("non-deneb block returned after the fork with type %T", res.Block)
}
blk, err := blocks.NewSignedBeaconBlock(res.GetDenebBlock())
if err != nil {
return err
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return err
}
if blk.Block().Slot() < fSlot {
return errors.Errorf("wanted a block at slot >= %d but received %d", fSlot, blk.Block().Slot())
}
return nil
}

View File

@@ -16,6 +16,7 @@ import (
e2e "github.com/prysmaticlabs/prysm/v4/testing/endtoend/params"
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/policies"
e2etypes "github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
@@ -132,17 +133,28 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
justifiedRoots := make([][]byte, len(conns))
prevJustifiedRoots := make([][]byte, len(conns))
finalizedRoots := make([][]byte, len(conns))
chainHeads := make([]*eth.ChainHead, len(conns))
g, _ := errgroup.WithContext(context.Background())
for i, conn := range conns {
beaconClient := eth.NewBeaconChainClient(conn)
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
if err != nil {
return errors.Wrapf(err, "connection number=%d", i)
}
headEpochs[i] = chainHead.HeadEpoch
justifiedRoots[i] = chainHead.JustifiedBlockRoot
prevJustifiedRoots[i] = chainHead.PreviousJustifiedBlockRoot
finalizedRoots[i] = chainHead.FinalizedBlockRoot
time.Sleep(connTimeDelay)
conIdx := i
currConn := conn
g.Go(func() error {
beaconClient := eth.NewBeaconChainClient(currConn)
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
if err != nil {
return errors.Wrapf(err, "connection number=%d", conIdx)
}
headEpochs[conIdx] = chainHead.HeadEpoch
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
chainHeads[conIdx] = chainHead
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
for i := 0; i < len(conns); i++ {
@@ -156,10 +168,12 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
}
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
return fmt.Errorf(
"received conflicting justified block roots on node %d, expected %#x, received %#x",
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
i,
justifiedRoots[0],
justifiedRoots[i],
chainHeads[0].String(),
chainHeads[i].String(),
)
}
if !bytes.Equal(prevJustifiedRoots[0], prevJustifiedRoots[i]) {

View File

@@ -40,6 +40,7 @@ const (
AltairE2EForkEpoch = params.AltairE2EForkEpoch
BellatrixE2EForkEpoch = params.BellatrixE2EForkEpoch
CapellaE2EForkEpoch = params.CapellaE2EForkEpoch
DenebE2EForkEpoch = params.DenebE2EForkEpoch
)
// Graffiti is a list of sample graffiti strings.

View File

@@ -10,10 +10,10 @@ import (
// Run mainnet e2e config with the current release validator against latest beacon node.
func TestEndToEnd_MainnetConfig_ValidatorAtCurrentRelease(t *testing.T) {
r := e2eMainnet(t, true, false, types.StartAt(version.Phase0, params.E2EMainnetTestConfig()))
r := e2eMainnet(t, true, false, types.InitForkCfg(version.Phase0, version.Deneb, params.E2EMainnetTestConfig()))
r.run()
}
func TestEndToEnd_MainnetConfig_MultiClient(t *testing.T) {
e2eMainnet(t, false, true, types.StartAt(version.Phase0, params.E2EMainnetTestConfig()), types.WithValidatorCrossClient()).run()
e2eMainnet(t, false, true, types.InitForkCfg(version.Phase0, version.Capella, params.E2EMainnetTestConfig()), types.WithValidatorCrossClient()).run()
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestEndToEnd_MultiScenarioRun_Multiclient(t *testing.T) {
runner := e2eMainnet(t, false, true, types.StartAt(version.Phase0, params.E2EMainnetTestConfig()), types.WithEpochs(22))
runner := e2eMainnet(t, false, true, types.InitForkCfg(version.Phase0, version.Capella, params.E2EMainnetTestConfig()), types.WithEpochs(24))
runner.config.Evaluators = scenarioEvalsMulti()
runner.config.EvalInterceptor = runner.multiScenarioMulticlient
runner.scenarioRunner()

View File

@@ -3,16 +3,17 @@ package endtoend
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
)
func TestEndToEnd_MinimalConfig_WithBuilder(t *testing.T) {
r := e2eMinimal(t, version.Phase0, types.WithCheckpointSync(), types.WithBuilder())
r := e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Capella, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder())
r.run()
}
func TestEndToEnd_MinimalConfig_WithBuilder_ValidatorRESTApi(t *testing.T) {
r := e2eMinimal(t, version.Phase0, types.WithCheckpointSync(), types.WithBuilder(), types.WithValidatorRESTApi())
r := e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Capella, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithBuilder(), types.WithValidatorRESTApi())
r.run()
}

View File

@@ -3,11 +3,12 @@ package endtoend
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
)
func TestEndToEnd_MinimalConfig(t *testing.T) {
r := e2eMinimal(t, version.Phase0, types.WithCheckpointSync())
r := e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Deneb, params.E2ETestConfig()), types.WithCheckpointSync())
r.run()
}

View File

@@ -3,12 +3,13 @@ package endtoend
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
)
func TestEndToEnd_MultiScenarioRun(t *testing.T) {
runner := e2eMinimal(t, version.Phase0, types.WithEpochs(24))
runner := e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Deneb, params.E2ETestConfig()), types.WithEpochs(26))
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.multiScenario
@@ -16,16 +17,16 @@ func TestEndToEnd_MultiScenarioRun(t *testing.T) {
}
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
e2eMinimal(t, version.Phase0, types.WithRemoteSigner()).run()
e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Capella, params.E2ETestConfig()), types.WithRemoteSigner()).run()
}
func TestEndToEnd_MinimalConfig_ValidatorRESTApi(t *testing.T) {
e2eMinimal(t, version.Phase0, types.WithCheckpointSync(), types.WithValidatorRESTApi()).run()
e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Deneb, params.E2ETestConfig()), types.WithCheckpointSync(), types.WithValidatorRESTApi()).run()
}
func TestEndToEnd_ScenarioRun_EEOffline(t *testing.T) {
t.Skip("TODO(#10242) Prysm is current unable to handle an offline e2e")
runner := e2eMinimal(t, version.Phase0)
runner := e2eMinimal(t, types.InitForkCfg(version.Phase0, version.Deneb, params.E2ETestConfig()))
runner.config.Evaluators = scenarioEvals()
runner.config.EvalInterceptor = runner.eeOffline

View File

@@ -2,22 +2,41 @@ package types
import (
"fmt"
"math"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
func StartAt(v int, c *params.BeaconChainConfig) *params.BeaconChainConfig {
func InitForkCfg(start, end int, c *params.BeaconChainConfig) *params.BeaconChainConfig {
c = c.Copy()
if v >= version.Altair {
if end < start {
panic("end fork is less than the start fork")
}
if start >= version.Altair {
c.AltairForkEpoch = 0
}
if v >= version.Bellatrix {
if start >= version.Bellatrix {
c.BellatrixForkEpoch = 0
}
if v >= version.Capella {
if start >= version.Capella {
c.CapellaForkEpoch = 0
}
if start >= version.Deneb {
c.DenebForkEpoch = 0
}
if end < version.Deneb {
c.DenebForkEpoch = math.MaxUint64
}
if end < version.Capella {
c.CapellaForkEpoch = math.MaxUint64
}
if end < version.Bellatrix {
c.BellatrixForkEpoch = math.MaxUint64
}
if end < version.Altair {
c.AltairForkEpoch = math.MaxUint64
}
// Time TTD to line up roughly with the bellatrix fork epoch.
// E2E sets EL block production rate equal to SecondsPerETH1Block to keep the math simple.
ttd := uint64(c.BellatrixForkEpoch) * uint64(c.SlotsPerEpoch) * c.SecondsPerSlot