mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
17 Commits
pass_justi
...
fix-intero
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3120746e59 | ||
|
|
ec7949fa4b | ||
|
|
cb8eb4e955 | ||
|
|
800f3b572f | ||
|
|
9d3af41acb | ||
|
|
07a0a95ee7 | ||
|
|
9e7352704c | ||
|
|
2616de1eb1 | ||
|
|
b2e3c29ab3 | ||
|
|
83538251aa | ||
|
|
2442280e37 | ||
|
|
4608569495 | ||
|
|
20d013a30b | ||
|
|
b0a2115a26 | ||
|
|
102518e106 | ||
|
|
e49ed4d554 | ||
|
|
21775eed52 |
@@ -6,6 +6,7 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,12 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -309,7 +309,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
}
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndex": w.ValidatorIndex,
|
||||
"withdrawalAddress": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.Atoi(d.Epoch)
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
5
api/client/beacon/log.go
Normal file
5
api/client/beacon/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon")
|
||||
@@ -97,6 +97,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
@@ -105,6 +106,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
|
||||
@@ -290,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
if params.BeaconConfig().ConfigName != params.PraterName {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
|
||||
@@ -1189,11 +1189,3 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func emptyEth1data() *ethpb.Eth1Data {
|
||||
return ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmptyExecutionBlock occurs when the execution block is nil.
|
||||
ErrEmptyExecutionBlock = errors.New("empty execution block")
|
||||
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
|
||||
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
|
||||
// ErrInvalidDepositCount occurs when the value for mix in length is 0.
|
||||
|
||||
6
beacon-chain/cache/proposer_indices_type.go
vendored
6
beacon-chain/cache/proposer_indices_type.go
vendored
@@ -1,15 +1,9 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
|
||||
// a ProposerIndices struct.
|
||||
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
|
||||
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
|
||||
@@ -24,7 +24,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -58,9 +57,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
defer span.End()
|
||||
var err error
|
||||
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
if err != nil {
|
||||
|
||||
@@ -40,7 +40,6 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -114,16 +113,6 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockDA struct {
|
||||
t *testing.T
|
||||
scs []blocks.ROBlob
|
||||
err error
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
@@ -19,9 +19,6 @@ var ErrNotFoundState = kv.ErrNotFoundState
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
|
||||
|
||||
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
|
||||
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
|
||||
@@ -44,7 +44,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,9 +6,7 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -129,29 +127,6 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
|
||||
var remainingFolders []os.FileInfo
|
||||
var err error
|
||||
// Define the condition function for polling
|
||||
conditionFunc := func() bool {
|
||||
remainingFolders, err = afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
return len(remainingFolders) == expected
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
if conditionFunc() {
|
||||
break // Condition met, exit the loop
|
||||
}
|
||||
if time.Since(startTime) > 30*time.Second {
|
||||
return errors.New("timeout")
|
||||
}
|
||||
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
|
||||
}
|
||||
require.Equal(t, expected, len(remainingFolders))
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
@@ -137,7 +137,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
}
|
||||
}
|
||||
datafile := StoreDatafilePath(dirPath)
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
log.WithField("path", datafile).Info("Opening Bolt DB")
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
|
||||
@@ -27,7 +28,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
|
||||
}
|
||||
|
||||
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
log.WithFields(logrus.Fields{
|
||||
"configName": cf.Config.ConfigName,
|
||||
"forkName": version.String(cf.Fork),
|
||||
}).Info("Detected supported config for state & block version")
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
|
||||
@@ -57,13 +62,13 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return nil, errors.Wrap(errBlockTimeTooLate, fmt.Sprintf("(%d > %d)", time, latestBlkTime))
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search from.
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorNum := new(big.Int).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
var numOfBlocks uint64
|
||||
@@ -156,15 +156,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMaxTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMinTargetEth1Block(ctx, new(big.Int).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
// target time. This method is used when head.time > targetTime
|
||||
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
for bn := upperBoundBlk; ; bn = new(big.Int).Sub(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -181,7 +181,7 @@ func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big
|
||||
// Performs a search to find a target eth1 block which is just earlier than or equal to the
|
||||
// target time. This method is used when head.time < targetTime
|
||||
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
for bn := lowerBoundBlk; ; bn = new(big.Int).Add(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -201,7 +201,7 @@ func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big
|
||||
}
|
||||
|
||||
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
|
||||
bn := big.NewInt(0).SetUint64(bNum)
|
||||
bn := new(big.Int).SetUint64(bNum)
|
||||
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -23,9 +23,6 @@ var (
|
||||
ErrInvalidPayloadAttributes = errors.New("payload attributes are invalid / inconsistent")
|
||||
// ErrUnknownPayloadStatus when the payload status is unknown.
|
||||
ErrUnknownPayloadStatus = errors.New("unknown payload status")
|
||||
// ErrConfigMismatch when the execution node's terminal total difficulty or
|
||||
// terminal block hash received via the API mismatches Prysm's configuration value.
|
||||
ErrConfigMismatch = errors.New("execution client configuration mismatch")
|
||||
// ErrAcceptedSyncingPayloadStatus when the status of the payload is syncing or accepted.
|
||||
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
|
||||
// ErrInvalidPayloadStatus when the status of the payload is invalid.
|
||||
|
||||
@@ -298,9 +298,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
// Start from the deployment block if our last requested block
|
||||
// is behind it. This is as the deposit logs can only start from the
|
||||
// block of the deployment of the deposit contract.
|
||||
if deploymentBlock > currentBlockNum {
|
||||
currentBlockNum = deploymentBlock
|
||||
}
|
||||
currentBlockNum = max(currentBlockNum, deploymentBlock)
|
||||
// To store all blocks.
|
||||
headersMap := make(map[uint64]*types.HeaderInfo)
|
||||
rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
|
||||
@@ -384,15 +382,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
end := currentBlockNum + batchSize
|
||||
// Appropriately bound the request, as we do not
|
||||
// want request blocks beyond the current follow distance.
|
||||
if end > latestFollowHeight {
|
||||
end = latestFollowHeight
|
||||
}
|
||||
end = min(end, latestFollowHeight)
|
||||
query := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{
|
||||
s.cfg.depositContractAddr,
|
||||
},
|
||||
FromBlock: big.NewInt(0).SetUint64(start),
|
||||
ToBlock: big.NewInt(0).SetUint64(end),
|
||||
FromBlock: new(big.Int).SetUint64(start),
|
||||
ToBlock: new(big.Int).SetUint64(end),
|
||||
}
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
@@ -400,7 +396,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
withinLimit := remainingLogs < depositLogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
query.ToBlock = new(big.Int).SetUint64(latestFollowHeight)
|
||||
end = latestFollowHeight
|
||||
}
|
||||
logs, err := s.httpLogger.FilterLogs(ctx, query)
|
||||
@@ -482,11 +478,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
}
|
||||
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
|
||||
// Cache eth1 block header here.
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
|
||||
_, err := s.BlockHashByHeight(ctx, new(big.Int).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
|
||||
err = s.ProcessETH1Block(ctx, new(big.Int).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
if currClient != nil {
|
||||
currClient.Close()
|
||||
}
|
||||
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
|
||||
@@ -415,14 +415,11 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
|
||||
requestRange := (endBlock - startBlock) + 1
|
||||
elems := make([]gethRPC.BatchElem, 0, requestRange)
|
||||
headers := make([]*types.HeaderInfo, 0, requestRange)
|
||||
if requestRange == 0 {
|
||||
return headers, nil
|
||||
}
|
||||
for i := startBlock; i <= endBlock; i++ {
|
||||
header := &types.HeaderInfo{}
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: "eth_getBlockByNumber",
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
|
||||
Args: []interface{}{hexutil.EncodeBig(new(big.Int).SetUint64(i)), false},
|
||||
Result: header,
|
||||
Error: error(nil),
|
||||
})
|
||||
@@ -675,9 +672,7 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
|
||||
// the allotted limit.
|
||||
endReq -= 1
|
||||
}
|
||||
if endReq > end {
|
||||
endReq = end
|
||||
}
|
||||
endReq = min(endReq, end)
|
||||
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
|
||||
_, err := s.batchRequestHeaders(startReq, endReq)
|
||||
if err != nil {
|
||||
|
||||
@@ -56,6 +56,7 @@ type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
@@ -354,10 +355,14 @@ func (s *Service) MetadataSeq() uint64 {
|
||||
// AddPingMethod adds the metadata ping rpc method to the p2p service, so that it can
|
||||
// be used to refresh ENR.
|
||||
func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) {
|
||||
s.pingMethodLock.Lock()
|
||||
s.pingMethod = reqFunc
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeers() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ var (
|
||||
ErrInvalidFinalizedRoot = errors.New("invalid finalized root")
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
ErrInvalidParent = errors.New("mismatched parent root")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
)
|
||||
|
||||
@@ -401,7 +401,19 @@ func (s *Service) GetAttestationData(
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get target root")}
|
||||
}
|
||||
justifiedCheckpoint := s.FinalizedFetcher.CurrentJustifiedCheckpt()
|
||||
|
||||
headState, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get head state")}
|
||||
}
|
||||
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) { // Ensure justified checkpoint safety by processing head state across the boundary.
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
|
||||
}
|
||||
}
|
||||
justifiedCheckpoint := headState.CurrentJustifiedCheckpoint()
|
||||
|
||||
if err = s.AttestationCache.Put(&cache.AttestationConsensusData{
|
||||
Slot: req.Slot,
|
||||
HeadRoot: headRoot,
|
||||
|
||||
@@ -156,9 +156,6 @@ func TestGetSpec(t *testing.T) {
|
||||
var dam [4]byte
|
||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||
config.DomainApplicationMask = dam
|
||||
var dbs [4]byte
|
||||
copy(dam[:], []byte{'2', '0', '0', '0'})
|
||||
config.DomainBlobSidecar = dbs
|
||||
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -173,7 +170,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 130, len(data))
|
||||
assert.Equal(t, 129, len(data))
|
||||
for k, v := range data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
|
||||
@@ -838,10 +838,14 @@ func TestGetAttestationData(t *testing.T) {
|
||||
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for justified block")
|
||||
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpoint := ðpbalpha.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: justifiedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
chain := &mockChain.ChainService{
|
||||
Optimistic: false,
|
||||
@@ -849,6 +853,7 @@ func TestGetAttestationData(t *testing.T) {
|
||||
Root: blockRoot[:],
|
||||
CurrentJustifiedCheckPoint: justifiedCheckpoint,
|
||||
TargetRoot: blockRoot,
|
||||
State: beaconState,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
@@ -1076,10 +1081,14 @@ func TestGetAttestationData(t *testing.T) {
|
||||
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for justified block")
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpt := ðpbalpha.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: justifiedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpt))
|
||||
require.NoError(t, err)
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
chain := &mockChain.ChainService{
|
||||
@@ -1087,6 +1096,7 @@ func TestGetAttestationData(t *testing.T) {
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
CurrentJustifiedCheckPoint: justifiedCheckpt,
|
||||
TargetRoot: blockRoot,
|
||||
State: beaconState,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
@@ -1163,17 +1173,24 @@ func TestGetAttestationData(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash beacon block")
|
||||
justifiedBlockRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash justified block")
|
||||
|
||||
slot := primitives.Slot(10000)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpt := ðpbalpha.Checkpoint{
|
||||
Epoch: slots.ToEpoch(1500),
|
||||
Root: justifiedBlockRoot[:],
|
||||
}
|
||||
slot := primitives.Slot(10000)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpt))
|
||||
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
chain := &mockChain.ChainService{
|
||||
Root: blockRoot[:],
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
CurrentJustifiedCheckPoint: justifiedCheckpt,
|
||||
TargetRoot: blockRoot,
|
||||
State: beaconState,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
|
||||
@@ -49,12 +49,16 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
justifiedBlockRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash justified block")
|
||||
|
||||
slot := primitives.Slot(10000)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: slots.ToEpoch(1500),
|
||||
Root: justifiedBlockRoot[:],
|
||||
}
|
||||
require.NoError(t, err)
|
||||
slot := primitives.Slot(10000)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
|
||||
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -62,7 +66,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]},
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:], State: beaconState},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
|
||||
@@ -102,17 +102,21 @@ func TestGetAttestationData_OK(t *testing.T) {
|
||||
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for justified block")
|
||||
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: justifiedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:]},
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState},
|
||||
GenesisTimeFetcher: &mock.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
@@ -338,10 +342,15 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
|
||||
targetRoot, err := targetBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for target block")
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: justifiedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(justifiedCheckpoint))
|
||||
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -350,7 +359,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{
|
||||
TargetRoot: targetRoot, Root: blockRoot[:],
|
||||
TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Slot: 2,
|
||||
Slot: 66,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
|
||||
Validators: validators,
|
||||
@@ -35,8 +35,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := state.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, allActive, active)
|
||||
require.Equal(t, uint64(0), current)
|
||||
require.Equal(t, uint64(0), previous)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
|
||||
// Add some votes in the last two epochs:
|
||||
base.CurrentEpochParticipation[0] = 0xFF
|
||||
@@ -57,8 +57,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
active, previous, current, err = state.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
|
||||
require.Equal(t, uint64(0), current)
|
||||
require.Equal(t, allActive, active)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
|
||||
}
|
||||
|
||||
@@ -27,25 +27,44 @@ func UnrealizedCheckpointBalances(cp, pp []byte, validators ValReader, currentEp
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
|
||||
if active && !v.Slashed {
|
||||
activeCurrent := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
|
||||
if activeCurrent {
|
||||
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if ((cp[i] >> targetIdx) & 1) == 1 {
|
||||
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
if v.Slashed {
|
||||
continue
|
||||
}
|
||||
if activeCurrent && ((cp[i]>>targetIdx)&1) == 1 {
|
||||
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if ((pp[i] >> targetIdx) & 1) == 1 {
|
||||
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
activePrevious := v.ActivationEpoch < currentEpoch && currentEpoch <= v.ExitEpoch
|
||||
if activePrevious && ((pp[i]>>targetIdx)&1) == 1 {
|
||||
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
activeBalance, prevTarget, currentTarget = ensureLowerBound(activeBalance, prevTarget, currentTarget)
|
||||
return activeBalance, prevTarget, currentTarget, nil
|
||||
}
|
||||
|
||||
func ensureLowerBound(activeCurrEpoch, prevTargetAttested, currTargetAttested uint64) (uint64, uint64, uint64) {
|
||||
ebi := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if ebi > activeCurrEpoch {
|
||||
activeCurrEpoch = ebi
|
||||
}
|
||||
if ebi > prevTargetAttested {
|
||||
prevTargetAttested = ebi
|
||||
}
|
||||
if ebi > currTargetAttested {
|
||||
currTargetAttested = ebi
|
||||
}
|
||||
return activeCurrEpoch, prevTargetAttested, currTargetAttested
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 0)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("bad votes in last two epochs", func(tt *testing.T) {
|
||||
@@ -39,8 +39,8 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in last epoch", func(tt *testing.T) {
|
||||
@@ -50,7 +50,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in previous epoch", func(tt *testing.T) {
|
||||
@@ -59,7 +59,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
|
||||
@@ -79,7 +79,6 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
validators[1].Slashed = true
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValSliceReader(validators), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
@@ -119,8 +118,8 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 0)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("bad votes in last two epochs", func(tt *testing.T) {
|
||||
@@ -129,8 +128,8 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in last epoch", func(tt *testing.T) {
|
||||
@@ -140,7 +139,7 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
|
||||
require.Equal(tt, uint64(0), previous)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, previous)
|
||||
})
|
||||
|
||||
t.Run("two votes in previous epoch", func(tt *testing.T) {
|
||||
@@ -149,7 +148,7 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, uint64(0), current)
|
||||
require.Equal(tt, params.BeaconConfig().EffectiveBalanceIncrement, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
})
|
||||
|
||||
@@ -169,7 +168,6 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
validators[1].Slashed = true
|
||||
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, NewValMultiValueSliceReader(mv, &testObject{id: 0}), 1)
|
||||
require.NoError(tt, err)
|
||||
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(tt, expectedActive, active)
|
||||
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
|
||||
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
|
||||
|
||||
@@ -243,13 +243,13 @@ func (s *Service) logBatchSyncStatus(genesis time.Time, firstBlk blocks.ROBlock,
|
||||
firstRoot := firstBlk.Root()
|
||||
timeRemaining := time.Duration(float64(slots.Since(genesis)-firstBlk.Block().Slot())/rate) * time.Second
|
||||
log.WithFields(logrus.Fields{
|
||||
"peers": len(s.cfg.P2P.Peers().Connected()),
|
||||
"blocksPerSecond": fmt.Sprintf("%.1f", rate),
|
||||
}).Infof(
|
||||
"Processing block batch of size %d starting from %s %d/%d - estimated time remaining %s",
|
||||
nBlocks, fmt.Sprintf("0x%s...", hex.EncodeToString(firstRoot[:])[:8]),
|
||||
firstBlk.Block().Slot(), slots.Since(genesis), timeRemaining,
|
||||
)
|
||||
"peers": len(s.cfg.P2P.Peers().Connected()),
|
||||
"blocksPerSecond": fmt.Sprintf("%.1f", rate),
|
||||
"batchSize": nBlocks,
|
||||
"startingFrom": fmt.Sprintf("0x%s...", hex.EncodeToString(firstRoot[:])[:8]),
|
||||
"latestProcessedSlot/currentSlot": fmt.Sprintf("%d/%d", firstBlk.Block().Slot(), slots.Since(genesis)),
|
||||
"estimatedTimeRemaining": timeRemaining,
|
||||
}).Info("Processing blocks")
|
||||
}
|
||||
|
||||
// processBlock performs basic checks on incoming block, and triggers receiver function.
|
||||
|
||||
@@ -194,7 +194,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
log.Infof("Synced up to slot %d", s.cfg.Chain.HeadSlot())
|
||||
log.WithField("slot", s.cfg.Chain.HeadSlot()).Info("Synced up to")
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
|
||||
@@ -248,7 +248,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to topic")
|
||||
log.WithField("topic", topic).Info("Subscribed to")
|
||||
return sub
|
||||
}
|
||||
|
||||
@@ -286,6 +286,11 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
b, err := v(ctx, pid, msg)
|
||||
// We do not penalize peers if we are hitting pubsub timeouts
|
||||
// trying to process those messages.
|
||||
if b == pubsub.ValidationReject && ctx.Err() != nil {
|
||||
b = pubsub.ValidationIgnore
|
||||
}
|
||||
if b == pubsub.ValidationReject {
|
||||
fields := logrus.Fields{
|
||||
"topic": topic,
|
||||
@@ -687,7 +692,7 @@ func (s *Service) retrievePersistentSubs(currSlot primitives.Slot) []uint64 {
|
||||
return slice.SetUint64(append(persistentSubs, wantedSubs...))
|
||||
}
|
||||
|
||||
func (_ *Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
|
||||
func (*Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
|
||||
subs := cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
return slice.SetUint64(subs)
|
||||
}
|
||||
@@ -742,17 +747,17 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
}
|
||||
|
||||
// Add fork digest to topic.
|
||||
func (_ *Service) addDigestToTopic(topic string, digest [4]byte) string {
|
||||
func (*Service) addDigestToTopic(topic string, digest [4]byte) string {
|
||||
if !strings.Contains(topic, "%x") {
|
||||
log.Fatal("Topic does not have appropriate formatter for digest")
|
||||
log.Error("Topic does not have appropriate formatter for digest")
|
||||
}
|
||||
return fmt.Sprintf(topic, digest)
|
||||
}
|
||||
|
||||
// Add the digest and index to subnet topic.
|
||||
func (_ *Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint64) string {
|
||||
func (*Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint64) string {
|
||||
if !strings.Contains(topic, "%x") {
|
||||
log.Fatal("Topic does not have appropriate formatter for digest")
|
||||
log.Error("Topic does not have appropriate formatter for digest")
|
||||
}
|
||||
return fmt.Sprintf(topic, digest, idx)
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ go_test(
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"initializer_test.go",
|
||||
"result_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -29,9 +29,7 @@ const (
|
||||
RequireSidecarProposerExpected
|
||||
)
|
||||
|
||||
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
|
||||
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
|
||||
var GossipSidecarRequirements = []Requirement{
|
||||
var allSidecarRequirements = []Requirement{
|
||||
RequireBlobIndexInBounds,
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
@@ -45,26 +43,32 @@ var GossipSidecarRequirements = []Requirement{
|
||||
RequireSidecarProposerExpected,
|
||||
}
|
||||
|
||||
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
|
||||
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
|
||||
var GossipSidecarRequirements = requirementList(allSidecarRequirements).excluding()
|
||||
|
||||
// SpectestSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
|
||||
// The only requirements we exclude for these tests are the parent validity and seen tests, as these are specific to
|
||||
// gossip processing and require the bad block cache that we only use there.
|
||||
var SpectestSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
|
||||
RequireSidecarParentSeen, RequireSidecarParentValid)
|
||||
|
||||
// InitsyncSidecarRequirements is the list of verification requirements to be used by the init-sync service
|
||||
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
|
||||
// for blobs after the block has been verified, and the blobs to be verified are keyed in the cache by the
|
||||
// block root, it is safe to skip the following verifications.
|
||||
// RequireSidecarProposerExpected
|
||||
// RequireNotFromFutureSlot,
|
||||
// RequireSlotAboveFinalized,
|
||||
// RequireSidecarParentSeen,
|
||||
// RequireSidecarParentValid,
|
||||
// RequireSidecarParentSlotLower,
|
||||
// RequireSidecarDescendsFromFinalized,
|
||||
var InitsyncSidecarRequirements = []Requirement{
|
||||
RequireValidProposerSignature,
|
||||
RequireSidecarKzgProofVerified,
|
||||
RequireBlobIndexInBounds,
|
||||
RequireSidecarInclusionProven,
|
||||
}
|
||||
// block root, the list of required verifications is much shorter than gossip.
|
||||
var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
RequireSidecarParentSeen,
|
||||
RequireSidecarParentValid,
|
||||
RequireSidecarParentSlotLower,
|
||||
RequireSidecarDescendsFromFinalized,
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements
|
||||
var BackfillSidecarRequirements = InitsyncSidecarRequirements
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
|
||||
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
var (
|
||||
ErrBlobInvalid = errors.New("blob failed verification")
|
||||
|
||||
@@ -61,13 +61,17 @@ func (d SignatureData) logFields() log.Fields {
|
||||
}
|
||||
}
|
||||
|
||||
func newSigCache(vr []byte, size int) *sigCache {
|
||||
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr}
|
||||
func newSigCache(vr []byte, size int, gf forkLookup) *sigCache {
|
||||
if gf == nil {
|
||||
gf = forks.Fork
|
||||
}
|
||||
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr, getFork: gf}
|
||||
}
|
||||
|
||||
type sigCache struct {
|
||||
*lru.Cache
|
||||
valRoot []byte
|
||||
getFork forkLookup
|
||||
}
|
||||
|
||||
// VerifySignature verifies the given signature data against the key obtained via ValidatorAtIndexer.
|
||||
@@ -81,7 +85,7 @@ func (c *sigCache) VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err
|
||||
}
|
||||
}()
|
||||
e := slots.ToEpoch(sig.Slot)
|
||||
fork, err := forks.Fork(e)
|
||||
fork, err := c.getFork(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestVerifySignature(t *testing.T) {
|
||||
_, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1)
|
||||
b := blobs[0]
|
||||
|
||||
sc := newSigCache(valRoot[:], 1)
|
||||
sc := newSigCache(valRoot[:], 1, nil)
|
||||
cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) {
|
||||
return ð.Validator{PublicKey: pk.Marshal()}, nil
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestSignatureCacheMissThenHit(t *testing.T) {
|
||||
_, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1)
|
||||
b := blobs[0]
|
||||
|
||||
sc := newSigCache(valRoot[:], 1)
|
||||
sc := newSigCache(valRoot[:], 1, nil)
|
||||
cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) {
|
||||
return ð.Validator{PublicKey: pk.Marshal()}, nil
|
||||
}
|
||||
|
||||
@@ -13,11 +13,17 @@ type VerificationMultiError struct {
|
||||
|
||||
// Unwrap is used by errors.Is to unwrap errors.
|
||||
func (ve VerificationMultiError) Unwrap() error {
|
||||
if ve.err == nil {
|
||||
return nil
|
||||
}
|
||||
return ve.err
|
||||
}
|
||||
|
||||
// Error satisfies the standard error interface.
|
||||
func (ve VerificationMultiError) Error() string {
|
||||
if ve.err == nil {
|
||||
return ""
|
||||
}
|
||||
return ve.err.Error()
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Forkchoicer represents the forkchoice methods that the verifiers need.
|
||||
@@ -59,13 +61,25 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
sync.RWMutex
|
||||
ready bool
|
||||
cw startup.ClockWaiter
|
||||
ini *Initializer
|
||||
ready bool
|
||||
cw startup.ClockWaiter
|
||||
ini *Initializer
|
||||
getFork forkLookup
|
||||
}
|
||||
|
||||
type forkLookup func(targetEpoch primitives.Epoch) (*ethpb.Fork, error)
|
||||
|
||||
type InitializerOption func(waiter *InitializerWaiter)
|
||||
|
||||
// WithForkLookup allows tests to modify how Fork consensus type lookup works. Needed for spectests with weird Forks.
|
||||
func WithForkLookup(fl forkLookup) InitializerOption {
|
||||
return func(iw *InitializerWaiter) {
|
||||
iw.getFork = fl
|
||||
}
|
||||
}
|
||||
|
||||
// NewInitializerWaiter creates an InitializerWaiter which can be used to obtain an Initializer once async dependencies are ready.
|
||||
func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRooter) *InitializerWaiter {
|
||||
func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRooter, opts ...InitializerOption) *InitializerWaiter {
|
||||
pc := newPropCache()
|
||||
// signature cache is initialized in WaitForInitializer, since we need the genesis validators root, which can be obtained from startup.Clock.
|
||||
shared := &sharedResources{
|
||||
@@ -73,7 +87,14 @@ func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRoot
|
||||
pc: pc,
|
||||
sr: sr,
|
||||
}
|
||||
return &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}}
|
||||
iw := &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}}
|
||||
for _, o := range opts {
|
||||
o(iw)
|
||||
}
|
||||
if iw.getFork == nil {
|
||||
iw.getFork = forks.Fork
|
||||
}
|
||||
return iw
|
||||
}
|
||||
|
||||
// WaitForInitializer ensures that asynchronous initialization of the shared resources the initializer
|
||||
@@ -84,7 +105,7 @@ func (w *InitializerWaiter) WaitForInitializer(ctx context.Context) (*Initialize
|
||||
}
|
||||
// We wait until this point to initialize the signature cache because here we have access to the genesis validator root.
|
||||
vr := w.ini.shared.clock.GenesisValidatorsRoot()
|
||||
sc := newSigCache(vr[:], DefaultSignatureCacheSize)
|
||||
sc := newSigCache(vr[:], DefaultSignatureCacheSize, w.getFork)
|
||||
w.ini.shared.sc = sc
|
||||
return w.ini, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,54 @@ package verification
|
||||
// Requirement represents a validation check that needs to pass in order for a Verified form a consensus type to be issued.
|
||||
type Requirement int
|
||||
|
||||
var unknownRequirementName = "unknown"
|
||||
|
||||
func (r Requirement) String() string {
|
||||
switch r {
|
||||
case RequireBlobIndexInBounds:
|
||||
return "RequireBlobIndexInBounds"
|
||||
case RequireNotFromFutureSlot:
|
||||
return "RequireNotFromFutureSlot"
|
||||
case RequireSlotAboveFinalized:
|
||||
return "RequireSlotAboveFinalized"
|
||||
case RequireValidProposerSignature:
|
||||
return "RequireValidProposerSignature"
|
||||
case RequireSidecarParentSeen:
|
||||
return "RequireSidecarParentSeen"
|
||||
case RequireSidecarParentValid:
|
||||
return "RequireSidecarParentValid"
|
||||
case RequireSidecarParentSlotLower:
|
||||
return "RequireSidecarParentSlotLower"
|
||||
case RequireSidecarDescendsFromFinalized:
|
||||
return "RequireSidecarDescendsFromFinalized"
|
||||
case RequireSidecarInclusionProven:
|
||||
return "RequireSidecarInclusionProven"
|
||||
case RequireSidecarKzgProofVerified:
|
||||
return "RequireSidecarKzgProofVerified"
|
||||
case RequireSidecarProposerExpected:
|
||||
return "RequireSidecarProposerExpected"
|
||||
default:
|
||||
return unknownRequirementName
|
||||
}
|
||||
}
|
||||
|
||||
type requirementList []Requirement
|
||||
|
||||
func (rl requirementList) excluding(minus ...Requirement) []Requirement {
|
||||
rm := make(map[Requirement]struct{})
|
||||
nl := make([]Requirement, 0, len(rl)-len(minus))
|
||||
for i := range minus {
|
||||
rm[minus[i]] = struct{}{}
|
||||
}
|
||||
for i := range rl {
|
||||
if _, excluded := rm[rl[i]]; excluded {
|
||||
continue
|
||||
}
|
||||
nl = append(nl, rl[i])
|
||||
}
|
||||
return nl
|
||||
}
|
||||
|
||||
// results collects positive verification results.
|
||||
// This bitmap can be used to test which verifications have been successfully completed in order to
|
||||
// decide whether it is safe to issue a "Verified" type variant.
|
||||
|
||||
63
beacon-chain/verification/result_test.go
Normal file
63
beacon-chain/verification/result_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestResultList(t *testing.T) {
|
||||
const (
|
||||
a Requirement = iota
|
||||
b
|
||||
c
|
||||
d
|
||||
e
|
||||
f
|
||||
g
|
||||
h
|
||||
)
|
||||
// leave out h to test excluding non-existent item
|
||||
all := []Requirement{a, b, c, d, e, f, g}
|
||||
alsoAll := requirementList(all).excluding()
|
||||
require.DeepEqual(t, all, alsoAll)
|
||||
missingFirst := requirementList(all).excluding(a)
|
||||
require.Equal(t, len(all)-1, len(missingFirst))
|
||||
require.DeepEqual(t, all[1:], missingFirst)
|
||||
missingLast := requirementList(all).excluding(g)
|
||||
require.Equal(t, len(all)-1, len(missingLast))
|
||||
require.DeepEqual(t, all[0:len(all)-1], missingLast)
|
||||
missingEnds := requirementList(missingLast).excluding(a)
|
||||
require.Equal(t, len(missingLast)-1, len(missingEnds))
|
||||
require.DeepEqual(t, all[1:len(all)-1], missingEnds)
|
||||
excludeNonexist := requirementList(missingEnds).excluding(h)
|
||||
require.Equal(t, len(missingEnds), len(excludeNonexist))
|
||||
require.DeepEqual(t, missingEnds, excludeNonexist)
|
||||
}
|
||||
|
||||
func TestExportedBlobSanityCheck(t *testing.T) {
|
||||
// make sure all requirement lists contain the bare minimum checks
|
||||
sanity := []Requirement{RequireValidProposerSignature, RequireSidecarKzgProofVerified, RequireBlobIndexInBounds, RequireSidecarInclusionProven}
|
||||
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements}
|
||||
for i := range reqs {
|
||||
r := reqs[i]
|
||||
reqMap := make(map[Requirement]struct{})
|
||||
for ii := range r {
|
||||
reqMap[r[ii]] = struct{}{}
|
||||
}
|
||||
for ii := range sanity {
|
||||
_, ok := reqMap[sanity[ii]]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
}
|
||||
require.DeepEqual(t, allSidecarRequirements, GossipSidecarRequirements)
|
||||
}
|
||||
|
||||
func TestAllBlobRequirementsHaveStrings(t *testing.T) {
|
||||
var derp Requirement = math.MaxInt
|
||||
require.Equal(t, unknownRequirementName, derp.String())
|
||||
for i := range allSidecarRequirements {
|
||||
require.NotEqual(t, unknownRequirementName, allSidecarRequirements[i].String())
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["options.go"],
|
||||
srcs = [
|
||||
"log.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/execution",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
|
||||
5
cmd/beacon-chain/execution/log.go
Normal file
5
cmd/beacon-chain/execution/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package execution
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "execution")
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -53,7 +52,7 @@ func parseJWTSecretFromFile(c *cli.Context) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
strData := strings.TrimSpace(string(enc))
|
||||
if len(strData) == 0 {
|
||||
if strData == "" {
|
||||
return nil, fmt.Errorf("provided JWT secret in file %s cannot be empty", jwtSecretFile)
|
||||
}
|
||||
secret, err := hex.DecodeString(strings.TrimPrefix(strData, "0x"))
|
||||
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
}
|
||||
writeSSZStateTransitionsFlag = &cli.BoolFlag{
|
||||
Name: "interop-write-ssz-state-transitions",
|
||||
Usage: "Writes SSZ states to disk after attempted state transitio.",
|
||||
Usage: "Writes failed SSZ block to disk after attempted importing block and state transition.",
|
||||
}
|
||||
disableGRPCConnectionLogging = &cli.BoolFlag{
|
||||
Name: "disable-grpc-connection-logging",
|
||||
@@ -97,7 +97,7 @@ var (
|
||||
}
|
||||
EnableMinimalSlashingProtection = &cli.BoolFlag{
|
||||
Name: "enable-minimal-slashing-protection",
|
||||
Usage: "Enables the minimal slashing protection. See EIP-3076 for more details.",
|
||||
Usage: "(Experimental): Enables the minimal slashing protection. See EIP-3076 for more details.",
|
||||
}
|
||||
enableDoppelGangerProtection = &cli.BoolFlag{
|
||||
Name: "enable-doppelganger",
|
||||
|
||||
@@ -120,7 +120,6 @@ type BeaconChainConfig struct {
|
||||
DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
|
||||
DomainApplicationBuilder [4]byte `yaml:"DOMAIN_APPLICATION_BUILDER" spec:"true"` // DomainApplicationBuilder defines the BLS signature domain for application builder.
|
||||
DomainBLSToExecutionChange [4]byte `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE" spec:"true"` // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
|
||||
DomainBlobSidecar [4]byte `yaml:"DOMAIN_BLOB_SIDECAR" spec:"true"` // DomainBlobSidecar defines the BLS signature domain for blob sidecar.
|
||||
|
||||
// Prysm constants.
|
||||
GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.
|
||||
|
||||
@@ -166,7 +166,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
DomainApplicationMask: bytesutil.Uint32ToBytes4(0x00000001),
|
||||
DomainApplicationBuilder: bytesutil.Uint32ToBytes4(0x00000001),
|
||||
DomainBLSToExecutionChange: bytesutil.Uint32ToBytes4(0x0A000000),
|
||||
DomainBlobSidecar: bytesutil.Uint32ToBytes4(0x0B000000),
|
||||
|
||||
// Prysm constants.
|
||||
GweiPerEth: 1000000000,
|
||||
|
||||
1742
proto/prysm/v1alpha1/beacon_block.pb.go
generated
1742
proto/prysm/v1alpha1/beacon_block.pb.go
generated
File diff suppressed because it is too large
Load Diff
1249
proto/prysm/v1alpha1/validator.pb.go
generated
1249
proto/prysm/v1alpha1/validator.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -537,6 +537,9 @@ message DutiesResponse {
|
||||
|
||||
// Whether the validator belongs in the sync committee and has to perform sync committee duty.
|
||||
bool is_sync_committee = 8;
|
||||
|
||||
// The number of committees in the duty's slot.
|
||||
uint64 committees_at_slot = 9;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,5 @@ import (
|
||||
)
|
||||
|
||||
func TestMainnet_Deneb_Forkchoice(t *testing.T) {
|
||||
t.Skip("This will fail until we re-integrate proof verification")
|
||||
forkchoice.Run(t, "mainnet", version.Deneb)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,5 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimal_Deneb_Forkchoice(t *testing.T) {
|
||||
t.Skip("blocked by go-kzg-4844 minimal trusted setup")
|
||||
forkchoice.Run(t, "minimal", version.Deneb)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
|
||||
require.NoError(t, err)
|
||||
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
|
||||
require.NoError(t, err)
|
||||
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
|
||||
require.NoError(t, err, "Could not process justification")
|
||||
|
||||
@@ -76,6 +76,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
|
||||
require.NoError(t, err)
|
||||
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
|
||||
require.NoError(t, err)
|
||||
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
|
||||
require.NoError(t, err, "Could not process justification")
|
||||
|
||||
@@ -80,6 +80,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -35,6 +35,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
|
||||
require.NoError(t, err)
|
||||
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
|
||||
require.NoError(t, err)
|
||||
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
|
||||
require.NoError(t, err, "Could not process justification")
|
||||
|
||||
@@ -80,6 +80,13 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -23,16 +25,27 @@ type Builder struct {
|
||||
service *blockchain.Service
|
||||
lastTick int64
|
||||
execMock *engineMock
|
||||
vwait *verification.InitializerWaiter
|
||||
}
|
||||
|
||||
func NewBuilder(t testing.TB, initialState state.BeaconState, initialBlock interfaces.ReadOnlySignedBeaconBlock) *Builder {
|
||||
execMock := &engineMock{
|
||||
powBlocks: make(map[[32]byte]*ethpb.PowBlock),
|
||||
}
|
||||
service := startChainService(t, initialState, initialBlock, execMock)
|
||||
cw := startup.NewClockSynchronizer()
|
||||
service, sg, fc := startChainService(t, initialState, initialBlock, execMock, cw)
|
||||
// blob spectests use a weird Fork in the genesis beacon state that has different previous and current versions.
|
||||
// This trips up the lite fork lookup code in the blob verifier that figures out the fork
|
||||
// based on the slot of the block. So just for spectests we override that behavior and get the fork from the state
|
||||
// which matches the behavior of block verification.
|
||||
getFork := func(targetEpoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
return initialState.Fork(), nil
|
||||
}
|
||||
bvw := verification.NewInitializerWaiter(cw, fc, sg, verification.WithForkLookup(getFork))
|
||||
return &Builder{
|
||||
service: service,
|
||||
execMock: execMock,
|
||||
vwait: bvw,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +101,7 @@ func (bb *Builder) block(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) [
|
||||
// InvalidBlock receives the invalid block and notifies forkchoice.
|
||||
func (bb *Builder) InvalidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) {
|
||||
r := bb.block(t, b)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
defer cancel()
|
||||
require.Equal(t, true, bb.service.ReceiveBlock(ctx, b, r, nil) != nil)
|
||||
}
|
||||
@@ -96,7 +109,7 @@ func (bb *Builder) InvalidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconB
|
||||
// ValidBlock receives the valid block and notifies forkchoice.
|
||||
func (bb *Builder) ValidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconBlock) {
|
||||
r := bb.block(t, b)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
defer cancel()
|
||||
require.NoError(t, bb.service.ReceiveBlock(ctx, b, r, nil))
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -115,7 +116,7 @@ func runTest(t *testing.T, config string, fork int, basePath string) {
|
||||
t.Fatalf("unknown fork version: %v", fork)
|
||||
}
|
||||
}
|
||||
runBlobStep(t, step.Blobs, beaconBlock, fork, folder, testsFolderPath, step.Proofs, builder)
|
||||
runBlobStep(t, step, beaconBlock, fork, folder, testsFolderPath, builder)
|
||||
if beaconBlock != nil {
|
||||
if step.Valid != nil && !*step.Valid {
|
||||
builder.InvalidBlock(t, beaconBlock)
|
||||
@@ -281,14 +282,15 @@ func unmarshalSignedDenebBlock(t *testing.T, raw []byte) interfaces.SignedBeacon
|
||||
}
|
||||
|
||||
func runBlobStep(t *testing.T,
|
||||
blobs *string,
|
||||
step Step,
|
||||
beaconBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
fork int,
|
||||
folder os.DirEntry,
|
||||
testsFolderPath string,
|
||||
proofs []*string,
|
||||
builder *Builder,
|
||||
) {
|
||||
blobs := step.Blobs
|
||||
proofs := step.Proofs
|
||||
if blobs != nil && *blobs != "null" {
|
||||
require.NotNil(t, beaconBlock)
|
||||
require.Equal(t, true, fork >= version.Deneb)
|
||||
@@ -305,44 +307,94 @@ func runBlobStep(t *testing.T,
|
||||
require.NoError(t, err)
|
||||
sh, err := beaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
for index := uint64(0); index*fieldparams.BlobLength < uint64(len(blobsSSZ)); index++ {
|
||||
requireVerifyExpected := errAssertionForStep(step, verification.ErrBlobInvalid)
|
||||
for index := 0; index*fieldparams.BlobLength < len(blobsSSZ); index++ {
|
||||
var proof []byte
|
||||
if index < uint64(len(proofs)) {
|
||||
if index < len(proofs) {
|
||||
proofPTR := proofs[index]
|
||||
require.NotNil(t, proofPTR)
|
||||
proof, err = hexutil.Decode(*proofPTR)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var kzg []byte
|
||||
if uint64(len(kzgs)) < index {
|
||||
kzg = kzgs[index]
|
||||
}
|
||||
if len(kzg) == 0 {
|
||||
kzg = make([]byte, 48)
|
||||
}
|
||||
blob := [fieldparams.BlobLength]byte{}
|
||||
copy(blob[:], blobsSSZ[index*fieldparams.BlobLength:])
|
||||
fakeProof := make([][]byte, fieldparams.KzgCommitmentInclusionProofDepth)
|
||||
for i := range fakeProof {
|
||||
fakeProof[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
if len(proof) == 0 {
|
||||
proof = make([]byte, 48)
|
||||
}
|
||||
|
||||
inclusionProof, err := blocks.MerkleProofKZGCommitment(block.Body(), index)
|
||||
require.NoError(t, err)
|
||||
pb := ðpb.BlobSidecar{
|
||||
Index: index,
|
||||
Index: uint64(index),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: kzg,
|
||||
KzgCommitment: kzgs[index],
|
||||
KzgProof: proof,
|
||||
SignedBlockHeader: sh,
|
||||
CommitmentInclusionProof: fakeProof,
|
||||
CommitmentInclusionProof: inclusionProof,
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(pb, root)
|
||||
require.NoError(t, err)
|
||||
vsc, err := verification.BlobSidecarNoop(ro)
|
||||
ini, err := builder.vwait.WaitForInitializer(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, builder.service.ReceiveBlob(context.Background(), vsc))
|
||||
bv := ini.NewBlobVerifier(ro, verification.SpectestSidecarRequirements)
|
||||
ctx := context.Background()
|
||||
if err := bv.BlobIndexInBounds(); err != nil {
|
||||
t.Logf("BlobIndexInBounds error: %s", err.Error())
|
||||
}
|
||||
if err := bv.NotFromFutureSlot(); err != nil {
|
||||
t.Logf("NotFromFutureSlot error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SlotAboveFinalized(); err != nil {
|
||||
t.Logf("SlotAboveFinalized error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SidecarInclusionProven(); err != nil {
|
||||
t.Logf("SidecarInclusionProven error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SidecarKzgProofVerified(); err != nil {
|
||||
t.Logf("SidecarKzgProofVerified error: %s", err.Error())
|
||||
}
|
||||
if err := bv.ValidProposerSignature(ctx); err != nil {
|
||||
t.Logf("ValidProposerSignature error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SidecarParentSlotLower(); err != nil {
|
||||
t.Logf("SidecarParentSlotLower error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SidecarDescendsFromFinalized(); err != nil {
|
||||
t.Logf("SidecarDescendsFromFinalized error: %s", err.Error())
|
||||
}
|
||||
if err := bv.SidecarProposerExpected(ctx); err != nil {
|
||||
t.Logf("SidecarProposerExpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
vsc, err := bv.VerifiedROBlob()
|
||||
requireVerifyExpected(t, err)
|
||||
|
||||
if err == nil {
|
||||
require.NoError(t, builder.service.ReceiveBlob(context.Background(), vsc))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errAssertionForStep(step Step, expect error) func(t *testing.T, err error) {
|
||||
if !*step.Valid {
|
||||
return func(t *testing.T, err error) {
|
||||
require.ErrorIs(t, err, expect)
|
||||
}
|
||||
}
|
||||
return func(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
require.ErrorIs(t, err, verification.ErrBlobInvalid)
|
||||
me, ok := err.(verification.VerificationMultiError)
|
||||
require.Equal(t, true, ok)
|
||||
fails := me.Failures()
|
||||
// we haven't performed any verification, so all the results should be this type
|
||||
fmsg := make([]string, 0, len(fails))
|
||||
for k, v := range fails {
|
||||
fmsg = append(fmsg, fmt.Sprintf("%s - %s", v.Error(), k.String()))
|
||||
}
|
||||
t.Fatal(strings.Join(fmsg, ";"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -34,7 +35,8 @@ func startChainService(t testing.TB,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
engineMock *engineMock,
|
||||
) *blockchain.Service {
|
||||
clockSync *startup.ClockSynchronizer,
|
||||
) (*blockchain.Service, *stategen.State, forkchoice.ForkChoicer) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, block))
|
||||
@@ -58,28 +60,30 @@ func startChainService(t testing.TB,
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
sg := stategen.New(db, fc)
|
||||
opts := append([]blockchain.Option{},
|
||||
blockchain.WithExecutionEngineCaller(engineMock),
|
||||
blockchain.WithFinalizedStateAtStartUp(st),
|
||||
blockchain.WithDatabase(db),
|
||||
blockchain.WithAttestationService(attPool),
|
||||
blockchain.WithForkChoiceStore(fc),
|
||||
blockchain.WithStateGen(stategen.New(db, fc)),
|
||||
blockchain.WithStateGen(sg),
|
||||
blockchain.WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
blockchain.WithAttestationPool(attestations.NewPool()),
|
||||
blockchain.WithDepositCache(depositCache),
|
||||
blockchain.WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
blockchain.WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
blockchain.WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
blockchain.WithClockSynchronizer(clockSync),
|
||||
blockchain.WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
blockchain.WithSyncChecker(mock.MockChecker{}),
|
||||
blockchain.WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
)
|
||||
service, err := blockchain.NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
// force start kzg context here until Deneb fork epoch is decided
|
||||
require.NoError(t, kzg.Start())
|
||||
require.NoError(t, service.StartFromSavedState(st))
|
||||
return service
|
||||
return service, sg, fc
|
||||
}
|
||||
|
||||
type engineMock struct {
|
||||
|
||||
@@ -26,7 +26,6 @@ type SingleMerkleProof struct {
|
||||
}
|
||||
|
||||
func RunMerkleProofTests(t *testing.T, config, forkOrPhase string, unmarshaller ssz_static.Unmarshaller) {
|
||||
t.Skip("testvectors are not available yet")
|
||||
runSingleMerkleProofTests(t, config, forkOrPhase, unmarshaller)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,11 @@ func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, st state
|
||||
require.NoError(t, err)
|
||||
_, bp, err = altair.ProcessEpochParticipation(ctx, st, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
activeBal, targetPrevious, targetCurrent, err := st.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
st, err = precompute.ProcessJustificationAndFinalizationPreCompute(st, bp)
|
||||
require.NoError(t, err, "Could not process justification")
|
||||
|
||||
|
||||
@@ -70,8 +70,16 @@ func runPrecomputeRewardsAndPenaltiesTest(t *testing.T, testFolderPath string) {
|
||||
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, preBeaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
activeBal, targetPrevious, targetCurrent, err := preBeaconState.UnrealizedCheckpointBalances()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bp.ActiveCurrentEpoch, activeBal)
|
||||
require.Equal(t, bp.CurrentEpochTargetAttested, targetCurrent)
|
||||
require.Equal(t, bp.PrevEpochTargetAttested, targetPrevious)
|
||||
|
||||
deltas, err := altair.AttestationsDelta(preBeaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/testing/validator-mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
|
||||
3
testing/validator-mock/validator_client_mock.go
generated
3
testing/validator-mock/validator_client_mock.go
generated
@@ -13,7 +13,6 @@ import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
iface "github.com/prysmaticlabs/prysm/v5/validator/client/iface"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
@@ -402,7 +401,7 @@ func (mr *MockValidatorClientMockRecorder) SubmitValidatorRegistrations(arg0, ar
|
||||
}
|
||||
|
||||
// SubscribeCommitteeSubnets mocks base method.
|
||||
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 []primitives.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
func (m *MockValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 []*eth.DutiesResponse_Duty) (*emptypb.Empty, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*emptypb.Empty)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
|
||||
@@ -174,9 +173,9 @@ func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Cont
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) (*empty.Empty, error) {
|
||||
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
|
||||
return wrapInMetrics[*empty.Empty]("SubscribeCommitteeSubnets", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, validatorIndices)
|
||||
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, duties)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -95,6 +95,14 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get committees for epoch `%d`", epoch)
|
||||
}
|
||||
slotCommittees := make(map[string]uint64)
|
||||
for _, c := range committees {
|
||||
n, ok := slotCommittees[c.Slot]
|
||||
if !ok {
|
||||
n = 0
|
||||
}
|
||||
slotCommittees[c.Slot] = n + 1
|
||||
}
|
||||
|
||||
// Mapping from a validator index to its attesting committee's index and slot
|
||||
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
|
||||
@@ -195,14 +203,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
|
||||
}
|
||||
|
||||
duties[index] = ðpb.DutiesResponse_Duty{
|
||||
Committee: committeeValidatorIndices,
|
||||
CommitteeIndex: committeeIndex,
|
||||
AttesterSlot: attesterSlot,
|
||||
ProposerSlots: proposerDutySlots[validatorIndex],
|
||||
PublicKey: pubkey,
|
||||
Status: validatorStatus.Status,
|
||||
ValidatorIndex: validatorIndex,
|
||||
IsSyncCommittee: syncDutiesMapping[validatorIndex],
|
||||
Committee: committeeValidatorIndices,
|
||||
CommitteeIndex: committeeIndex,
|
||||
AttesterSlot: attesterSlot,
|
||||
ProposerSlots: proposerDutySlots[validatorIndex],
|
||||
PublicKey: pubkey,
|
||||
Status: validatorStatus.Status,
|
||||
ValidatorIndex: validatorIndex,
|
||||
IsSyncCommittee: syncDutiesMapping[validatorIndex],
|
||||
CommitteesAtSlot: slotCommittees[strconv.FormatUint(uint64(attesterSlot), 10)],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -880,69 +880,75 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
|
||||
validatorIndices[0],
|
||||
validatorIndices[1],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[0],
|
||||
AttesterSlot: committeeSlots[0],
|
||||
PublicKey: pubkeys[0],
|
||||
Status: statuses[0],
|
||||
ValidatorIndex: validatorIndices[0],
|
||||
CommitteeIndex: committeeIndices[0],
|
||||
AttesterSlot: committeeSlots[0],
|
||||
PublicKey: pubkeys[0],
|
||||
Status: statuses[0],
|
||||
ValidatorIndex: validatorIndices[0],
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
Committee: []primitives.ValidatorIndex{
|
||||
validatorIndices[0],
|
||||
validatorIndices[1],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[0],
|
||||
AttesterSlot: committeeSlots[0],
|
||||
PublicKey: pubkeys[1],
|
||||
Status: statuses[1],
|
||||
ValidatorIndex: validatorIndices[1],
|
||||
CommitteeIndex: committeeIndices[0],
|
||||
AttesterSlot: committeeSlots[0],
|
||||
PublicKey: pubkeys[1],
|
||||
Status: statuses[1],
|
||||
ValidatorIndex: validatorIndices[1],
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
Committee: []primitives.ValidatorIndex{
|
||||
validatorIndices[2],
|
||||
validatorIndices[3],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[1],
|
||||
AttesterSlot: committeeSlots[1],
|
||||
PublicKey: pubkeys[2],
|
||||
Status: statuses[2],
|
||||
ValidatorIndex: validatorIndices[2],
|
||||
CommitteeIndex: committeeIndices[1],
|
||||
AttesterSlot: committeeSlots[1],
|
||||
PublicKey: pubkeys[2],
|
||||
Status: statuses[2],
|
||||
ValidatorIndex: validatorIndices[2],
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
Committee: []primitives.ValidatorIndex{
|
||||
validatorIndices[2],
|
||||
validatorIndices[3],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[1],
|
||||
AttesterSlot: committeeSlots[1],
|
||||
PublicKey: pubkeys[3],
|
||||
Status: statuses[3],
|
||||
ValidatorIndex: validatorIndices[3],
|
||||
CommitteeIndex: committeeIndices[1],
|
||||
AttesterSlot: committeeSlots[1],
|
||||
PublicKey: pubkeys[3],
|
||||
Status: statuses[3],
|
||||
ValidatorIndex: validatorIndices[3],
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
Committee: []primitives.ValidatorIndex{
|
||||
validatorIndices[4],
|
||||
validatorIndices[5],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[2],
|
||||
AttesterSlot: committeeSlots[2],
|
||||
PublicKey: pubkeys[4],
|
||||
Status: statuses[4],
|
||||
ValidatorIndex: validatorIndices[4],
|
||||
ProposerSlots: expectedProposerSlots1,
|
||||
CommitteeIndex: committeeIndices[2],
|
||||
AttesterSlot: committeeSlots[2],
|
||||
PublicKey: pubkeys[4],
|
||||
Status: statuses[4],
|
||||
ValidatorIndex: validatorIndices[4],
|
||||
ProposerSlots: expectedProposerSlots1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
Committee: []primitives.ValidatorIndex{
|
||||
validatorIndices[4],
|
||||
validatorIndices[5],
|
||||
},
|
||||
CommitteeIndex: committeeIndices[2],
|
||||
AttesterSlot: committeeSlots[2],
|
||||
PublicKey: pubkeys[5],
|
||||
Status: statuses[5],
|
||||
ValidatorIndex: validatorIndices[5],
|
||||
ProposerSlots: expectedProposerSlots2,
|
||||
IsSyncCommittee: testCase.fetchSyncDuties,
|
||||
CommitteeIndex: committeeIndices[2],
|
||||
AttesterSlot: committeeSlots[2],
|
||||
PublicKey: pubkeys[5],
|
||||
Status: statuses[5],
|
||||
ValidatorIndex: validatorIndices[5],
|
||||
ProposerSlots: expectedProposerSlots2,
|
||||
IsSyncCommittee: testCase.fetchSyncDuties,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
PublicKey: pubkeys[6],
|
||||
|
||||
@@ -8,63 +8,26 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (c beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) error {
|
||||
func (c beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) error {
|
||||
if in == nil {
|
||||
return errors.New("committee subnets subscribe request is nil")
|
||||
}
|
||||
|
||||
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(validatorIndices) {
|
||||
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length")
|
||||
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(duties) {
|
||||
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length")
|
||||
}
|
||||
|
||||
slotToCommitteesAtSlotMap := make(map[primitives.Slot]uint64)
|
||||
jsonCommitteeSubscriptions := make([]*structs.BeaconCommitteeSubscription, len(in.CommitteeIds))
|
||||
for index := range in.CommitteeIds {
|
||||
subscribeSlot := in.Slots[index]
|
||||
subscribeCommitteeId := in.CommitteeIds[index]
|
||||
subscribeIsAggregator := in.IsAggregator[index]
|
||||
subscribeValidatorIndex := validatorIndices[index]
|
||||
|
||||
committeesAtSlot, foundSlot := slotToCommitteesAtSlotMap[subscribeSlot]
|
||||
if !foundSlot {
|
||||
// Lazily fetch the committeesAtSlot from the beacon node if they are not already in the map
|
||||
epoch := slots.ToEpoch(subscribeSlot)
|
||||
duties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, validatorIndices)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get duties for epoch `%d`", epoch)
|
||||
}
|
||||
|
||||
for _, duty := range duties {
|
||||
dutySlot, err := strconv.ParseUint(duty.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse slot `%s`", duty.Slot)
|
||||
}
|
||||
|
||||
committees, err := strconv.ParseUint(duty.CommitteesAtSlot, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse CommitteesAtSlot `%s`", duty.CommitteesAtSlot)
|
||||
}
|
||||
|
||||
slotToCommitteesAtSlotMap[primitives.Slot(dutySlot)] = committees
|
||||
}
|
||||
|
||||
// If the slot still isn't in the map, we either received bad data from the beacon node or the caller of this function gave us bad data
|
||||
if committeesAtSlot, foundSlot = slotToCommitteesAtSlotMap[subscribeSlot]; !foundSlot {
|
||||
return errors.Errorf("failed to get committees for slot `%d`", subscribeSlot)
|
||||
}
|
||||
}
|
||||
|
||||
jsonCommitteeSubscriptions[index] = &structs.BeaconCommitteeSubscription{
|
||||
CommitteeIndex: strconv.FormatUint(uint64(subscribeCommitteeId), 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot, 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlot), 10),
|
||||
IsAggregator: subscribeIsAggregator,
|
||||
ValidatorIndex: strconv.FormatUint(uint64(subscribeValidatorIndex), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(in.CommitteeIds[index]), 10),
|
||||
CommitteesAtSlot: strconv.FormatUint(duties[index].CommitteesAtSlot, 10),
|
||||
Slot: strconv.FormatUint(uint64(in.Slots[index]), 10),
|
||||
IsAggregator: in.IsAggregator[index],
|
||||
ValidatorIndex: strconv.FormatUint(uint64(duties[index].ValidatorIndex), 10),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/prysmaticlabs/prysm/v5/validator/client/beacon-api/mock"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
@@ -67,43 +66,8 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Even though we have 3 distinct slots, the first 2 ones are in the same epoch so we should only send 2 requests to the beacon node
|
||||
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
slots.ToEpoch(subscribeSlots[0]),
|
||||
validatorIndices,
|
||||
).Return(
|
||||
[]*structs.AttesterDuty{
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[0], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[0]), 10),
|
||||
},
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[1], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[1]), 10),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
slots.ToEpoch(subscribeSlots[2]),
|
||||
validatorIndices,
|
||||
).Return(
|
||||
[]*structs.AttesterDuty{
|
||||
{
|
||||
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[2], 10),
|
||||
Slot: strconv.FormatUint(uint64(subscribeSlots[2]), 10),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
dutiesProvider: dutiesProvider,
|
||||
}
|
||||
err = validatorClient.subscribeCommitteeSubnets(
|
||||
ctx,
|
||||
@@ -112,21 +76,31 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
|
||||
CommitteeIds: committeeIndices,
|
||||
IsAggregator: isAggregator,
|
||||
},
|
||||
validatorIndices,
|
||||
[]*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: validatorIndices[0],
|
||||
CommitteesAtSlot: committeesAtSlot[0],
|
||||
},
|
||||
{
|
||||
ValidatorIndex: validatorIndices[1],
|
||||
CommitteesAtSlot: committeesAtSlot[1],
|
||||
},
|
||||
{
|
||||
ValidatorIndex: validatorIndices[2],
|
||||
CommitteesAtSlot: committeesAtSlot[2],
|
||||
},
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `validatorIndices` don't have the same length"
|
||||
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeRequest *ethpb.CommitteeSubnetsSubscribeRequest
|
||||
validatorIndices []primitives.ValidatorIndex
|
||||
attesterDuty *structs.AttesterDuty
|
||||
dutiesError error
|
||||
expectGetDutiesQuery bool
|
||||
duties []*ethpb.DutiesResponse_Duty
|
||||
expectSubscribeRestCall bool
|
||||
expectedErrorMessage string
|
||||
}{
|
||||
@@ -142,7 +116,16 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
Slots: []primitives.Slot{1, 2},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{1, 2},
|
||||
duties: []*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
CommitteesAtSlot: 2,
|
||||
},
|
||||
},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
@@ -152,7 +135,16 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
Slots: []primitives.Slot{1},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{1, 2},
|
||||
duties: []*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
CommitteesAtSlot: 2,
|
||||
},
|
||||
},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
@@ -162,76 +154,33 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
Slots: []primitives.Slot{1, 2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{1, 2},
|
||||
duties: []*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
CommitteesAtSlot: 2,
|
||||
},
|
||||
},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "ValidatorIndices size mismatch",
|
||||
name: "duties size mismatch",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
CommitteeIds: []primitives.CommitteeIndex{1, 2},
|
||||
Slots: []primitives.Slot{1, 2},
|
||||
IsAggregator: []bool{false, true},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{1},
|
||||
duties: []*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
},
|
||||
expectedErrorMessage: arraySizeMismatchErrorMessage,
|
||||
},
|
||||
{
|
||||
name: "bad duties query",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []primitives.Slot{1},
|
||||
CommitteeIds: []primitives.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{3},
|
||||
dutiesError: errors.New("foo error"),
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to get duties for epoch `0`: foo error",
|
||||
},
|
||||
{
|
||||
name: "bad duty slot",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []primitives.Slot{1},
|
||||
CommitteeIds: []primitives.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{3},
|
||||
attesterDuty: &structs.AttesterDuty{
|
||||
Slot: "foo",
|
||||
CommitteesAtSlot: "1",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to parse slot `foo`",
|
||||
},
|
||||
{
|
||||
name: "bad duty committees at slot",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []primitives.Slot{1},
|
||||
CommitteeIds: []primitives.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{3},
|
||||
attesterDuty: &structs.AttesterDuty{
|
||||
Slot: "1",
|
||||
CommitteesAtSlot: "foo",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to parse CommitteesAtSlot `foo`",
|
||||
},
|
||||
{
|
||||
name: "missing slot in duties",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
Slots: []primitives.Slot{1},
|
||||
CommitteeIds: []primitives.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{3},
|
||||
attesterDuty: &structs.AttesterDuty{
|
||||
Slot: "2",
|
||||
CommitteesAtSlot: "3",
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectedErrorMessage: "failed to get committees for slot `1`",
|
||||
},
|
||||
{
|
||||
name: "bad POST request",
|
||||
subscribeRequest: ðpb.CommitteeSubnetsSubscribeRequest{
|
||||
@@ -239,12 +188,12 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
CommitteeIds: []primitives.CommitteeIndex{2},
|
||||
IsAggregator: []bool{false},
|
||||
},
|
||||
validatorIndices: []primitives.ValidatorIndex{3},
|
||||
attesterDuty: &structs.AttesterDuty{
|
||||
Slot: "1",
|
||||
CommitteesAtSlot: "2",
|
||||
duties: []*ethpb.DutiesResponse_Duty{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteesAtSlot: 1,
|
||||
},
|
||||
},
|
||||
expectGetDutiesQuery: true,
|
||||
expectSubscribeRestCall: true,
|
||||
expectedErrorMessage: "foo error",
|
||||
},
|
||||
@@ -257,18 +206,6 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
dutiesProvider := mock.NewMockdutiesProvider(ctrl)
|
||||
if testCase.expectGetDutiesQuery {
|
||||
dutiesProvider.EXPECT().GetAttesterDuties(
|
||||
ctx,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
[]*structs.AttesterDuty{testCase.attesterDuty},
|
||||
testCase.dutiesError,
|
||||
).Times(1)
|
||||
}
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
if testCase.expectSubscribeRestCall {
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
@@ -284,9 +221,8 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
dutiesProvider: dutiesProvider,
|
||||
}
|
||||
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.validatorIndices)
|
||||
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.duties)
|
||||
assert.ErrorContains(t, testCase.expectedErrorMessage, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
|
||||
"google.golang.org/grpc"
|
||||
@@ -99,7 +98,7 @@ func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context,
|
||||
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*empty.Empty, error) {
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
|
||||
@@ -137,7 +137,7 @@ type ValidatorClient interface {
|
||||
SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error)
|
||||
SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error)
|
||||
ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error)
|
||||
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex) (*empty.Empty, error)
|
||||
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error)
|
||||
CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error)
|
||||
GetSyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error)
|
||||
SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error)
|
||||
|
||||
@@ -629,7 +629,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots := make([]primitives.Slot, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeCommitteeIndices := make([]primitives.CommitteeIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeIsAggregator := make([]bool, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
subscribeValidatorIndices := make([]primitives.ValidatorIndex, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
activeDuties := make([]*ethpb.DutiesResponse_Duty, 0, len(res.CurrentEpochDuties)+len(res.NextEpochDuties))
|
||||
alreadySubscribed := make(map[[64]byte]bool)
|
||||
|
||||
if v.distributed {
|
||||
@@ -662,7 +662,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots = append(subscribeSlots, attesterSlot)
|
||||
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
|
||||
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
|
||||
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
|
||||
activeDuties = append(activeDuties, duty)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -688,7 +688,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
subscribeSlots = append(subscribeSlots, attesterSlot)
|
||||
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
|
||||
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
|
||||
subscribeValidatorIndices = append(subscribeValidatorIndices, validatorIndex)
|
||||
activeDuties = append(activeDuties, duty)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,7 +698,7 @@ func (v *validator) subscribeToSubnets(ctx context.Context, res *ethpb.DutiesRes
|
||||
CommitteeIds: subscribeCommitteeIndices,
|
||||
IsAggregator: subscribeIsAggregator,
|
||||
},
|
||||
subscribeValidatorIndices,
|
||||
activeDuties,
|
||||
)
|
||||
|
||||
return err
|
||||
|
||||
@@ -520,7 +520,7 @@ func TestUpdateDuties_OK(t *testing.T) {
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
|
||||
wg.Done()
|
||||
return nil, nil
|
||||
})
|
||||
@@ -568,7 +568,7 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
|
||||
wg.Done()
|
||||
return nil, nil
|
||||
})
|
||||
@@ -700,7 +700,7 @@ func TestUpdateDuties_Distributed(t *testing.T) {
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex) (*emptypb.Empty, error) {
|
||||
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.DutiesResponse_Duty) (*emptypb.Empty, error) {
|
||||
wg.Done()
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
@@ -124,13 +124,18 @@ func ConvertDatabase(ctx context.Context, sourceDataDir string, targetDataDir st
|
||||
// -----------------
|
||||
// Get the proposer settings.
|
||||
proposerSettings, err := sourceDatabase.ProposerSettings(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer settings from source database")
|
||||
}
|
||||
|
||||
// Save the proposer settings.
|
||||
if err := targetDatabase.SaveProposerSettings(ctx, proposerSettings); err != nil {
|
||||
return errors.Wrap(err, "could not save proposer settings")
|
||||
switch err {
|
||||
case nil:
|
||||
// Save the proposer settings.
|
||||
if err := targetDatabase.SaveProposerSettings(ctx, proposerSettings); err != nil {
|
||||
return errors.Wrap(err, "could not save proposer settings")
|
||||
}
|
||||
|
||||
case kv.ErrNoProposerSettingsFound, filesystem.ErrNoProposerSettingsFound:
|
||||
// Nothing to do.
|
||||
default:
|
||||
return errors.Wrap(err, "could not get proposer settings from source database")
|
||||
}
|
||||
|
||||
// Attestations
|
||||
|
||||
@@ -49,217 +49,224 @@ func TestDB_ConvertDatabase(t *testing.T) {
|
||||
defaultFeeRecipient := getFeeRecipientFromString(t, defaultFeeRecipientString)
|
||||
customFeeRecipient := getFeeRecipientFromString(t, customFeeRecipientString)
|
||||
|
||||
for _, minimalToComplete := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("minimalToComplete=%v", minimalToComplete), func(t *testing.T) {
|
||||
// Create signing root
|
||||
signingRoot := [fieldparams.RootLength]byte{}
|
||||
var signingRootBytes []byte
|
||||
if minimalToComplete {
|
||||
signingRootBytes = signingRoot[:]
|
||||
}
|
||||
for _, minimalToComplete := range [...]bool{false, true} {
|
||||
for _, withProposerSettings := range [...]bool{false, true} {
|
||||
t.Run(fmt.Sprintf("minimalToComplete=%v", minimalToComplete), func(t *testing.T) {
|
||||
// Create signing root
|
||||
signingRoot := [fieldparams.RootLength]byte{}
|
||||
var signingRootBytes []byte
|
||||
if minimalToComplete {
|
||||
signingRootBytes = signingRoot[:]
|
||||
}
|
||||
|
||||
// Create database directoriy path.
|
||||
datadir := t.TempDir()
|
||||
// Create database directoriy path.
|
||||
datadir := t.TempDir()
|
||||
|
||||
// Run source DB preparation.
|
||||
// --------------------------
|
||||
// Create the source database.
|
||||
var (
|
||||
sourceDatabase, targetDatabase iface.ValidatorDB
|
||||
err error
|
||||
)
|
||||
// Run source DB preparation.
|
||||
// --------------------------
|
||||
// Create the source database.
|
||||
var (
|
||||
sourceDatabase, targetDatabase iface.ValidatorDB
|
||||
err error
|
||||
)
|
||||
|
||||
if minimalToComplete {
|
||||
sourceDatabase, err = filesystem.NewStore(datadir, &filesystem.Config{
|
||||
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
|
||||
})
|
||||
} else {
|
||||
sourceDatabase, err = kv.NewKVStore(ctx, datadir, &kv.Config{
|
||||
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
|
||||
})
|
||||
}
|
||||
if minimalToComplete {
|
||||
sourceDatabase, err = filesystem.NewStore(datadir, &filesystem.Config{
|
||||
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
|
||||
})
|
||||
} else {
|
||||
sourceDatabase, err = kv.NewKVStore(ctx, datadir, &kv.Config{
|
||||
PubKeys: [][fieldparams.BLSPubkeyLength]byte{pubkey1, pubkey2},
|
||||
})
|
||||
}
|
||||
|
||||
require.NoError(t, err, "could not create source database")
|
||||
require.NoError(t, err, "could not create source database")
|
||||
|
||||
// Save the genesis validator root.
|
||||
expectedGenesisValidatorRoot := []byte("genesis-validator-root")
|
||||
err = sourceDatabase.SaveGenesisValidatorsRoot(ctx, expectedGenesisValidatorRoot)
|
||||
require.NoError(t, err, "could not save genesis validator root")
|
||||
// Save the genesis validator root.
|
||||
expectedGenesisValidatorRoot := []byte("genesis-validator-root")
|
||||
err = sourceDatabase.SaveGenesisValidatorsRoot(ctx, expectedGenesisValidatorRoot)
|
||||
require.NoError(t, err, "could not save genesis validator root")
|
||||
|
||||
// Save the graffiti file hash.
|
||||
// (Getting the graffiti ordered index will set the graffiti file hash)
|
||||
expectedGraffitiFileHash := [32]byte{1}
|
||||
_, err = sourceDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
|
||||
require.NoError(t, err, "could not get graffiti ordered index")
|
||||
// Save the graffiti file hash.
|
||||
// (Getting the graffiti ordered index will set the graffiti file hash)
|
||||
expectedGraffitiFileHash := [32]byte{1}
|
||||
_, err = sourceDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
|
||||
require.NoError(t, err, "could not get graffiti ordered index")
|
||||
|
||||
// Save the graffiti ordered index.
|
||||
expectedGraffitiOrderedIndex := uint64(1)
|
||||
err = sourceDatabase.SaveGraffitiOrderedIndex(ctx, expectedGraffitiOrderedIndex)
|
||||
require.NoError(t, err, "could not save graffiti ordered index")
|
||||
// Save the graffiti ordered index.
|
||||
expectedGraffitiOrderedIndex := uint64(1)
|
||||
err = sourceDatabase.SaveGraffitiOrderedIndex(ctx, expectedGraffitiOrderedIndex)
|
||||
require.NoError(t, err, "could not save graffiti ordered index")
|
||||
|
||||
// Save the proposer settings.
|
||||
var relays []string = nil
|
||||
// Save the proposer settings.
|
||||
var relays []string = nil
|
||||
expectedProposerSettings := &proposer.Settings{}
|
||||
|
||||
expectedProposerSettings := &proposer.Settings{
|
||||
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
|
||||
pubkey1: {
|
||||
FeeRecipientConfig: &proposer.FeeRecipientConfig{
|
||||
FeeRecipient: customFeeRecipient,
|
||||
if withProposerSettings {
|
||||
expectedProposerSettings = &proposer.Settings{
|
||||
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*proposer.Option{
|
||||
pubkey1: {
|
||||
FeeRecipientConfig: &proposer.FeeRecipientConfig{
|
||||
FeeRecipient: customFeeRecipient,
|
||||
},
|
||||
BuilderConfig: &proposer.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: 42,
|
||||
Relays: relays,
|
||||
},
|
||||
},
|
||||
},
|
||||
BuilderConfig: &proposer.BuilderConfig{
|
||||
Enabled: true,
|
||||
GasLimit: 42,
|
||||
Relays: relays,
|
||||
DefaultConfig: &proposer.Option{
|
||||
FeeRecipientConfig: &proposer.FeeRecipientConfig{
|
||||
FeeRecipient: defaultFeeRecipient,
|
||||
},
|
||||
BuilderConfig: &proposer.BuilderConfig{
|
||||
Enabled: false,
|
||||
GasLimit: 43,
|
||||
Relays: relays,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = sourceDatabase.SaveProposerSettings(ctx, expectedProposerSettings)
|
||||
require.NoError(t, err, "could not save proposer settings")
|
||||
}
|
||||
|
||||
// Save some attestations.
|
||||
completeAttestations := []*ethpb.IndexedAttestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DefaultConfig: &proposer.Option{
|
||||
FeeRecipientConfig: &proposer.FeeRecipientConfig{
|
||||
FeeRecipient: defaultFeeRecipient,
|
||||
},
|
||||
BuilderConfig: &proposer.BuilderConfig{
|
||||
Enabled: false,
|
||||
GasLimit: 43,
|
||||
Relays: relays,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = sourceDatabase.SaveProposerSettings(ctx, expectedProposerSettings)
|
||||
require.NoError(t, err, "could not save proposer settings")
|
||||
|
||||
// Save some attestations.
|
||||
completeAttestations := []*ethpb.IndexedAttestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
},
|
||||
}
|
||||
|
||||
expectedAttestationRecords1 := []*common.AttestationRecord{
|
||||
{
|
||||
PubKey: pubkey1,
|
||||
Source: primitives.Epoch(2),
|
||||
Target: primitives.Epoch(3),
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
expectedAttestationRecords1 := []*common.AttestationRecord{
|
||||
{
|
||||
PubKey: pubkey1,
|
||||
Source: primitives.Epoch(2),
|
||||
Target: primitives.Epoch(3),
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
}
|
||||
expectedAttestationRecords2 := []*common.AttestationRecord{
|
||||
{
|
||||
PubKey: pubkey2,
|
||||
Source: primitives.Epoch(2),
|
||||
Target: primitives.Epoch(3),
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
}
|
||||
|
||||
expectedAttestationRecords2 := []*common.AttestationRecord{
|
||||
{
|
||||
PubKey: pubkey2,
|
||||
Source: primitives.Epoch(2),
|
||||
Target: primitives.Epoch(3),
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
}
|
||||
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey1, [][]byte{{1}, {2}}, completeAttestations)
|
||||
require.NoError(t, err, "could not save attestations")
|
||||
|
||||
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey1, [][]byte{{1}, {2}}, completeAttestations)
|
||||
require.NoError(t, err, "could not save attestations")
|
||||
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey2, [][]byte{{1}, {2}}, completeAttestations)
|
||||
require.NoError(t, err, "could not save attestations")
|
||||
|
||||
err = sourceDatabase.SaveAttestationsForPubKey(ctx, pubkey2, [][]byte{{1}, {2}}, completeAttestations)
|
||||
require.NoError(t, err, "could not save attestations")
|
||||
// Save some block proposals.
|
||||
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 42, []byte{})
|
||||
require.NoError(t, err, "could not save block proposal")
|
||||
|
||||
// Save some block proposals.
|
||||
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 42, []byte{})
|
||||
require.NoError(t, err, "could not save block proposal")
|
||||
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 43, []byte{})
|
||||
require.NoError(t, err, "could not save block proposal")
|
||||
|
||||
err = sourceDatabase.SaveProposalHistoryForSlot(ctx, pubkey1, 43, []byte{})
|
||||
require.NoError(t, err, "could not save block proposal")
|
||||
expectedProposals := []*common.Proposal{
|
||||
{
|
||||
Slot: 43,
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
}
|
||||
|
||||
expectedProposals := []*common.Proposal{
|
||||
{
|
||||
Slot: 43,
|
||||
SigningRoot: signingRootBytes,
|
||||
},
|
||||
}
|
||||
// Close the source database.
|
||||
err = sourceDatabase.Close()
|
||||
require.NoError(t, err, "could not close source database")
|
||||
|
||||
// Close the source database.
|
||||
err = sourceDatabase.Close()
|
||||
require.NoError(t, err, "could not close source database")
|
||||
// Source to target DB conversion.
|
||||
// -------------------------------
|
||||
err = ConvertDatabase(ctx, datadir, datadir, minimalToComplete)
|
||||
require.NoError(t, err, "could not convert source to target database")
|
||||
|
||||
// Source to target DB conversion.
|
||||
// ----------------------------------------
|
||||
err = ConvertDatabase(ctx, datadir, datadir, minimalToComplete)
|
||||
require.NoError(t, err, "could not convert source to target database")
|
||||
// Check the target database.
|
||||
// --------------------------
|
||||
if minimalToComplete {
|
||||
targetDatabase, err = kv.NewKVStore(ctx, datadir, nil)
|
||||
} else {
|
||||
targetDatabase, err = filesystem.NewStore(datadir, nil)
|
||||
}
|
||||
require.NoError(t, err, "could not get minimal database")
|
||||
|
||||
// Check the target database.
|
||||
// --------------------------
|
||||
if minimalToComplete {
|
||||
targetDatabase, err = kv.NewKVStore(ctx, datadir, nil)
|
||||
} else {
|
||||
targetDatabase, err = filesystem.NewStore(datadir, nil)
|
||||
}
|
||||
require.NoError(t, err, "could not get minimal database")
|
||||
// Check the genesis validator root.
|
||||
actualGenesisValidatoRoot, err := targetDatabase.GenesisValidatorsRoot(ctx)
|
||||
require.NoError(t, err, "could not get genesis validator root from target database")
|
||||
require.DeepSSZEqual(t, expectedGenesisValidatorRoot, actualGenesisValidatoRoot, "genesis validator root should match")
|
||||
|
||||
// Check the genesis validator root.
|
||||
actualGenesisValidatoRoot, err := targetDatabase.GenesisValidatorsRoot(ctx)
|
||||
require.NoError(t, err, "could not get genesis validator root from target database")
|
||||
require.DeepSSZEqual(t, expectedGenesisValidatorRoot, actualGenesisValidatoRoot, "genesis validator root should match")
|
||||
// Check the graffiti file hash.
|
||||
actualGraffitiFileHash, exists, err := targetDatabase.GraffitiFileHash()
|
||||
require.NoError(t, err, "could not get graffiti file hash from target database")
|
||||
require.Equal(t, true, exists, "graffiti file hash should exist")
|
||||
require.Equal(t, expectedGraffitiFileHash, actualGraffitiFileHash, "graffiti file hash should match")
|
||||
|
||||
// Check the graffiti file hash.
|
||||
actualGraffitiFileHash, exists, err := targetDatabase.GraffitiFileHash()
|
||||
require.NoError(t, err, "could not get graffiti file hash from target database")
|
||||
require.Equal(t, true, exists, "graffiti file hash should exist")
|
||||
require.Equal(t, expectedGraffitiFileHash, actualGraffitiFileHash, "graffiti file hash should match")
|
||||
// Check the graffiti ordered index.
|
||||
actualGraffitiOrderedIndex, err := targetDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
|
||||
require.NoError(t, err, "could not get graffiti ordered index from target database")
|
||||
require.Equal(t, expectedGraffitiOrderedIndex, actualGraffitiOrderedIndex, "graffiti ordered index should match")
|
||||
|
||||
// Check the graffiti ordered index.
|
||||
actualGraffitiOrderedIndex, err := targetDatabase.GraffitiOrderedIndex(ctx, expectedGraffitiFileHash)
|
||||
require.NoError(t, err, "could not get graffiti ordered index from target database")
|
||||
require.Equal(t, expectedGraffitiOrderedIndex, actualGraffitiOrderedIndex, "graffiti ordered index should match")
|
||||
if withProposerSettings {
|
||||
// Check the proposer settings.
|
||||
actualProposerSettings, err := targetDatabase.ProposerSettings(ctx)
|
||||
require.NoError(t, err, "could not get proposer settings from target database")
|
||||
require.DeepEqual(t, expectedProposerSettings, actualProposerSettings, "proposer settings should match")
|
||||
}
|
||||
|
||||
// Check the proposer settings.
|
||||
actualProposerSettings, err := targetDatabase.ProposerSettings(ctx)
|
||||
require.NoError(t, err, "could not get proposer settings from target database")
|
||||
require.DeepEqual(t, expectedProposerSettings, actualProposerSettings, "proposer settings should match")
|
||||
// Check the attestations.
|
||||
actualAttestationRecords, err := targetDatabase.AttestationHistoryForPubKey(ctx, pubkey1)
|
||||
require.NoError(t, err, "could not get attestations from target database")
|
||||
require.DeepEqual(t, expectedAttestationRecords1, actualAttestationRecords, "attestations should match")
|
||||
|
||||
// Check the attestations.
|
||||
actualAttestationRecords, err := targetDatabase.AttestationHistoryForPubKey(ctx, pubkey1)
|
||||
require.NoError(t, err, "could not get attestations from target database")
|
||||
require.DeepEqual(t, expectedAttestationRecords1, actualAttestationRecords, "attestations should match")
|
||||
actualAttestationRecords, err = targetDatabase.AttestationHistoryForPubKey(ctx, pubkey2)
|
||||
require.NoError(t, err, "could not get attestations from target database")
|
||||
require.DeepEqual(t, expectedAttestationRecords2, actualAttestationRecords, "attestations should match")
|
||||
|
||||
actualAttestationRecords, err = targetDatabase.AttestationHistoryForPubKey(ctx, pubkey2)
|
||||
require.NoError(t, err, "could not get attestations from target database")
|
||||
require.DeepEqual(t, expectedAttestationRecords2, actualAttestationRecords, "attestations should match")
|
||||
// Check the block proposals.
|
||||
actualProposals, err := targetDatabase.ProposalHistoryForPubKey(ctx, pubkey1)
|
||||
require.NoError(t, err, "could not get block proposals from target database")
|
||||
require.DeepEqual(t, expectedProposals, actualProposals, "block proposals should match")
|
||||
|
||||
// Check the block proposals.
|
||||
actualProposals, err := targetDatabase.ProposalHistoryForPubKey(ctx, pubkey1)
|
||||
require.NoError(t, err, "could not get block proposals from target database")
|
||||
require.DeepEqual(t, expectedProposals, actualProposals, "block proposals should match")
|
||||
// Close the target database.
|
||||
err = targetDatabase.Close()
|
||||
require.NoError(t, err, "could not close target database")
|
||||
|
||||
// Close the target database.
|
||||
err = targetDatabase.Close()
|
||||
require.NoError(t, err, "could not close target database")
|
||||
// Check the source database does not exist anymore.
|
||||
var existing bool
|
||||
|
||||
// Check the source database does not exist anymore.
|
||||
var existing bool
|
||||
if minimalToComplete {
|
||||
databasePath := filepath.Join(datadir, filesystem.DatabaseDirName)
|
||||
existing, err = file.Exists(databasePath, file.Directory)
|
||||
} else {
|
||||
databasePath := filepath.Join(datadir, kv.ProtectionDbFileName)
|
||||
existing, err = file.Exists(databasePath, file.Regular)
|
||||
}
|
||||
|
||||
if minimalToComplete {
|
||||
databasePath := filepath.Join(datadir, filesystem.DatabaseDirName)
|
||||
existing, err = file.Exists(databasePath, file.Directory)
|
||||
} else {
|
||||
databasePath := filepath.Join(datadir, kv.ProtectionDbFileName)
|
||||
existing, err = file.Exists(databasePath, file.Regular)
|
||||
}
|
||||
|
||||
require.NoError(t, err, "could not check if source database exists")
|
||||
require.Equal(t, false, existing, "source database should not exist")
|
||||
})
|
||||
require.NoError(t, err, "could not check if source database exists")
|
||||
require.Equal(t, false, existing, "source database should not exist")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,13 +74,13 @@ type KeyStatus struct {
|
||||
type KeyStatusType string
|
||||
|
||||
const (
|
||||
StatusImported KeyStatusType = "IMPORTED"
|
||||
StatusError KeyStatusType = "ERROR"
|
||||
StatusDuplicate KeyStatusType = "DUPLICATE"
|
||||
StatusUnknown KeyStatusType = "UNKNOWN"
|
||||
StatusNotFound KeyStatusType = "NOT_FOUND"
|
||||
StatusDeleted KeyStatusType = "DELETED"
|
||||
StatusNotActive KeyStatusType = "NOT_ACTIVE"
|
||||
StatusImported KeyStatusType = "imported"
|
||||
StatusError KeyStatusType = "error"
|
||||
StatusDuplicate KeyStatusType = "duplicate"
|
||||
StatusUnknown KeyStatusType = "unknown"
|
||||
StatusNotFound KeyStatusType = "not_found"
|
||||
StatusDeleted KeyStatusType = "deleted"
|
||||
StatusNotActive KeyStatusType = "not_active"
|
||||
)
|
||||
|
||||
// PublicKeyDeleter allows deleting public keys set in keymanager.
|
||||
|
||||
@@ -30,6 +30,7 @@ go_library(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/validator/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/proposer"
|
||||
@@ -479,7 +480,7 @@ func (s *Server) ImportRemoteKeys(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if isUrlUsed {
|
||||
log.Warnf("Setting web3signer base url for IMPORTED keys is not supported. Prysm only uses the url from --validators-external-signer-url flag for web3signerKeymanagerKind.")
|
||||
log.Warnf("Setting the remote signer base url within the request is not supported. The remote signer url can only be set from the --%s flag.", flags.Web3SignerURLFlag.Name)
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &RemoteKeysResponse{Data: adder.AddPublicKeys(remoteKeys)})
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -204,7 +205,7 @@ func TestServer_ImportKeystores(t *testing.T) {
|
||||
resp := &ImportKeystoresResponse{}
|
||||
require.NoError(t, json.Unmarshal(wr.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
require.Equal(t, keymanager.StatusError, resp.Data[0].Status)
|
||||
require.Equal(t, fmt.Sprintf("%v", keymanager.StatusError), strings.ToLower(string(resp.Data[0].Status))) // make sure it's lower case
|
||||
})
|
||||
t.Run("200 response even if number of passwords does not match number of keystores", func(t *testing.T) {
|
||||
request := &ImportKeystoresRequest{
|
||||
@@ -1406,6 +1407,7 @@ func TestServer_ImportRemoteKeys(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(w.Body.Bytes(), resp))
|
||||
for i := 0; i < len(resp.Data); i++ {
|
||||
require.DeepEqual(t, expectedStatuses[i], resp.Data[i])
|
||||
require.Equal(t, fmt.Sprintf("%v", expectedStatuses[i].Status), strings.ToLower(string(resp.Data[i].Status)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user