mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
4 Commits
focil
...
fast-confi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89053cdb70 | ||
|
|
5a95b44107 | ||
|
|
ebe9ec6014 | ||
|
|
3ce627a296 |
@@ -133,66 +133,6 @@ func SignedBLSChangesFromConsensus(src []*eth.SignedBLSToExecutionChange) []*Sig
|
||||
return changes
|
||||
}
|
||||
|
||||
func SignedInclusionListFromConsensus(src *eth.SignedInclusionList) *SignedInclusionList {
|
||||
transactions := make([]string, len(src.Message.Transactions))
|
||||
for i, transaction := range src.Message.Transactions {
|
||||
transactions[i] = hexutil.Encode(transaction)
|
||||
}
|
||||
|
||||
return &SignedInclusionList{
|
||||
Message: &InclusionList{
|
||||
Slot: fmt.Sprintf("%d", src.Message.Slot),
|
||||
ValidatorIndex: fmt.Sprintf("%d", src.Message.ValidatorIndex),
|
||||
InclusionListCommitteeRoot: hexutil.Encode(src.Message.InclusionListCommitteeRoot),
|
||||
Transactions: transactions,
|
||||
},
|
||||
Signature: hexutil.Encode(src.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SignedInclusionList) ToConsensus() (*eth.SignedInclusionList, error) {
|
||||
message, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
signature, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.SignedInclusionList{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *InclusionList) ToConsensus() (*eth.InclusionList, error) {
|
||||
slot, err := strconv.ParseUint(s.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(s.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorIndex")
|
||||
}
|
||||
inclusionListCommitteeRoot, err := bytesutil.DecodeHexWithLength(s.InclusionListCommitteeRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "InclusionListCommitteeRoot")
|
||||
}
|
||||
transactions := make([][]byte, len(s.Transactions))
|
||||
for i, transaction := range s.Transactions {
|
||||
transactions[i], err = bytesutil.DecodeHexWithMaxLength(transaction, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
return ð.InclusionList{
|
||||
Slot: primitives.Slot(slot),
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
InclusionListCommitteeRoot: inclusionListCommitteeRoot,
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Fork) ToConsensus() (*eth.Fork, error) {
|
||||
previousVersion, err := bytesutil.DecodeHexWithLength(s.PreviousVersion, 4)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,6 +31,7 @@ type GetForkChoiceDumpResponse struct {
|
||||
type ForkChoiceDumpExtraData struct {
|
||||
UnrealizedJustifiedCheckpoint *Checkpoint `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *Checkpoint `json:"unrealized_finalized_checkpoint"`
|
||||
SafeHeadRoot string `json:"safe_head_root"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root"`
|
||||
HeadRoot string `json:"head_root"`
|
||||
|
||||
@@ -103,11 +103,6 @@ type BlobSidecarEvent struct {
|
||||
VersionedHash string `json:"versioned_hash"`
|
||||
}
|
||||
|
||||
type InclusionListEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedInclusionList `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientFinalityUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientFinalityUpdate `json:"data"`
|
||||
|
||||
@@ -262,15 +262,3 @@ type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
type SignedInclusionList struct {
|
||||
Message *InclusionList `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type InclusionList struct {
|
||||
Slot string `json:"slot"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
InclusionListCommitteeRoot string `json:"inclusion_list_committee_root"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
"forkchoice_update_execution.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"inclusion_list.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
|
||||
@@ -42,7 +42,7 @@ type ForkchoiceFetcher interface {
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
GetAttesterHead() [32]byte
|
||||
SafeBlockHash() [32]byte
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
|
||||
@@ -27,13 +27,6 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// GetAttesterHead returns the corresponding value from forkchoice
|
||||
func (s *Service) GetAttesterHead() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.GetAttesterHead()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -98,6 +91,12 @@ func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
func (s *Service) SafeBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.SafeBlockHash()
|
||||
}
|
||||
|
||||
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
|
||||
func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -63,10 +63,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
safeBlockHash := s.cfg.ForkChoiceStore.SafeBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
SafeBlockHash: safeBlockHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {
|
||||
@@ -267,25 +267,12 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
|
||||
var txs [][]byte
|
||||
// Post-FOCIL, only consider the inclusion list constraint if it matches the current slot.
|
||||
if slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().Eip7805ForkEpoch && s.CurrentSlot() == blk.Block().Slot() {
|
||||
txs = s.inclusionListCache.Get(blk.Block().Slot() - 1)
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests, txs)
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrBadInclusionListPayloadStatus):
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
}).Info("Called new payload but inclusion list didn't satisfy")
|
||||
blk.Block().MarkInclusionListNotSatisfied() // Cache the block root that fails to satisfy the inclusion list constraint.
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const updateInclusionListBlockInterval = time.Second
|
||||
|
||||
// Routine that updates block building with inclusion lists one second before the slot starts.
|
||||
func (s *Service) updateBlockWithInclusionListRoutine() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
interval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - updateInclusionListBlockInterval
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{interval})
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-ticker.C():
|
||||
s.updateBlockWithInclusionList(context.Background())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Updates block building with inclusion lists, the current payload ID, and the new upload ID.
|
||||
func (s *Service) updateBlockWithInclusionList(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
|
||||
// Skip update if not in or past the FOCIL fork epoch.
|
||||
if slots.ToEpoch(currentSlot) < params.BeaconConfig().Eip7805ForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
|
||||
headRoot := s.headRoot()
|
||||
id, found := s.cfg.PayloadIDCache.PayloadID(currentSlot+1, headRoot)
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
txs := s.inclusionListCache.Get(currentSlot)
|
||||
if len(txs) == 0 {
|
||||
log.WithField("slot", currentSlot).Warn("No inclusion list transactions found to update block")
|
||||
return
|
||||
}
|
||||
|
||||
newID, err := s.cfg.ExecutionEngineCaller.UpdatePayloadWithInclusionList(ctx, id, txs)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to update block with inclusion list")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": currentSlot,
|
||||
"headRoot": fmt.Sprintf("%x", headRoot),
|
||||
"txs": len(txs),
|
||||
}).Info("Updated block with inclusion list")
|
||||
|
||||
s.cfg.PayloadIDCache.Set(currentSlot+1, headRoot, *newID)
|
||||
}
|
||||
@@ -275,10 +275,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithInclusionListCache(c *cache.InclusionLists) Option {
|
||||
return func(s *Service) error {
|
||||
s.inclusionListCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,14 +108,6 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
|
||||
// Prune inclusion list that's more than 1 epoch old.
|
||||
// Mean at the second 0 of slot 100, we prune the inclusion list of slot 98.
|
||||
cachedSlot := primitives.Slot(0)
|
||||
if slotInterval.Slot > 2 {
|
||||
cachedSlot = slotInterval.Slot - 2
|
||||
}
|
||||
s.inclusionListCache.Delete(cachedSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,6 @@ type Service struct {
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
|
||||
inclusionListCache *cache.InclusionLists
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -223,7 +222,6 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
go s.updateBlockWithInclusionListRoutine()
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
|
||||
@@ -632,6 +632,11 @@ func (s *ChainService) CachedHeadRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SafeBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) SafeBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetProposerHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetProposerHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
@@ -640,16 +645,6 @@ func (s *ChainService) GetProposerHead() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetAttesterHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetAttesterHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.GetAttesterHead()
|
||||
}
|
||||
var rootArr [32]byte
|
||||
copy(rootArr[:], s.Root)
|
||||
return rootArr
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -15,7 +15,6 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"inclusion_list.go",
|
||||
"interfaces.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
@@ -76,7 +75,6 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"inclusion_list_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
|
||||
105
beacon-chain/cache/inclusion_list.go
vendored
105
beacon-chain/cache/inclusion_list.go
vendored
@@ -1,105 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type InclusionLists struct {
|
||||
mu sync.RWMutex
|
||||
ils map[primitives.Slot]map[primitives.ValidatorIndex]struct {
|
||||
txs [][]byte
|
||||
seenTwice bool
|
||||
isBeforeFreezeDeadline bool
|
||||
}
|
||||
}
|
||||
|
||||
// NewInclusionLists initializes a new InclusionLists instance.
|
||||
func NewInclusionLists() *InclusionLists {
|
||||
return &InclusionLists{
|
||||
ils: make(map[primitives.Slot]map[primitives.ValidatorIndex]struct {
|
||||
txs [][]byte
|
||||
seenTwice bool
|
||||
isBeforeFreezeDeadline bool
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a set of transactions for a specific slot and validator index.
|
||||
func (i *InclusionLists) Add(slot primitives.Slot, validatorIndex primitives.ValidatorIndex, txs [][]byte, isBeforeFreezeDeadline bool) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
if _, ok := i.ils[slot]; !ok {
|
||||
i.ils[slot] = make(map[primitives.ValidatorIndex]struct {
|
||||
txs [][]byte
|
||||
seenTwice bool
|
||||
isBeforeFreezeDeadline bool
|
||||
})
|
||||
}
|
||||
|
||||
entry := i.ils[slot][validatorIndex]
|
||||
if entry.seenTwice {
|
||||
return // No need to modify if already marked as seen twice.
|
||||
}
|
||||
|
||||
if entry.txs == nil {
|
||||
entry.txs = txs
|
||||
entry.isBeforeFreezeDeadline = isBeforeFreezeDeadline
|
||||
} else {
|
||||
entry.seenTwice = true
|
||||
entry.txs = nil // Clear transactions to save space if seen twice.
|
||||
}
|
||||
i.ils[slot][validatorIndex] = entry
|
||||
}
|
||||
|
||||
// Get retrieves unique transactions for a specific slot.
|
||||
func (i *InclusionLists) Get(slot primitives.Slot) [][]byte {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
|
||||
ils, exists := i.ils[slot]
|
||||
if !exists {
|
||||
return [][]byte{}
|
||||
}
|
||||
|
||||
var uniqueTxs [][]byte
|
||||
seen := make(map[[32]byte]struct{})
|
||||
for _, entry := range ils {
|
||||
if !entry.isBeforeFreezeDeadline {
|
||||
continue
|
||||
}
|
||||
for _, tx := range entry.txs {
|
||||
hash := sha256.Sum256(tx)
|
||||
if _, duplicate := seen[hash]; !duplicate {
|
||||
uniqueTxs = append(uniqueTxs, tx)
|
||||
seen[hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return uniqueTxs
|
||||
}
|
||||
|
||||
// Delete removes all inclusion lists for a specific slot.
|
||||
func (i *InclusionLists) Delete(slot primitives.Slot) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
|
||||
delete(i.ils, slot)
|
||||
}
|
||||
|
||||
// SeenTwice checks if a validator's transactions were marked as seen twice for a specific slot.
|
||||
func (i *InclusionLists) SeenTwice(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
|
||||
ils, exists := i.ils[slot]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
entry, exists := ils[idx]
|
||||
return exists && entry.seenTwice
|
||||
}
|
||||
81
beacon-chain/cache/inclusion_list_test.go
vendored
81
beacon-chain/cache/inclusion_list_test.go
vendored
@@ -1,81 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInclusionLists(t *testing.T) {
|
||||
il := NewInclusionLists()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
actions func()
|
||||
expectedGet [][]byte
|
||||
expectedTwice bool
|
||||
}{
|
||||
{
|
||||
name: "Add single validator with unique transactions",
|
||||
actions: func() {
|
||||
il.Add(1, 1, [][]byte{[]byte("tx1"), []byte("tx2")}, true)
|
||||
},
|
||||
expectedGet: [][]byte{[]byte("tx1"), []byte("tx2")},
|
||||
expectedTwice: false,
|
||||
},
|
||||
{
|
||||
name: "Add duplicate transactions for second validator",
|
||||
actions: func() {
|
||||
il.Add(1, 2, [][]byte{[]byte("tx1"), []byte("tx3")}, true)
|
||||
},
|
||||
expectedGet: [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")},
|
||||
expectedTwice: false,
|
||||
},
|
||||
{
|
||||
name: "Mark validator as seen twice",
|
||||
actions: func() {
|
||||
il.Add(1, 1, [][]byte{[]byte("tx4")}, true)
|
||||
},
|
||||
expectedGet: [][]byte{[]byte("tx1"), []byte("tx3")},
|
||||
expectedTwice: true,
|
||||
},
|
||||
{
|
||||
name: "Delete a slot",
|
||||
actions: func() {
|
||||
il.Delete(1)
|
||||
},
|
||||
expectedGet: nil,
|
||||
expectedTwice: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.actions()
|
||||
|
||||
// Check Get results
|
||||
got := il.Get(1)
|
||||
if !compareTransactions(got, tt.expectedGet) {
|
||||
t.Errorf("unexpected Get result: got %v, want %v", got, tt.expectedGet)
|
||||
}
|
||||
|
||||
// Check SeenTwice result for validator 1
|
||||
gotTwice := il.SeenTwice(1, 1)
|
||||
if gotTwice != tt.expectedTwice {
|
||||
t.Errorf("unexpected SeenTwice result: got %v, want %v", gotTwice, tt.expectedTwice)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// compareTransactions compares two slices of byte slices for equality.
|
||||
func compareTransactions(a, b [][]byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !bytes.Equal(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -46,9 +46,6 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// InclusionListReceived is sent after an inclusion list is received from gossip or rpc
|
||||
InclusionListReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -85,11 +82,6 @@ type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
}
|
||||
|
||||
// InclusionListReceivedData is the data sent with InclusionListReceived events.
|
||||
type InclusionListReceivedData struct {
|
||||
SignedInclusionList *ethpb.SignedInclusionList
|
||||
}
|
||||
|
||||
// ProposerSlashingReceivedData is the data sent with ProposerSlashingReceived events.
|
||||
type ProposerSlashingReceivedData struct {
|
||||
ProposerSlashing *ethpb.ProposerSlashing
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"inclusion_list.go",
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
@@ -22,7 +21,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -272,10 +272,9 @@ func BeaconCommittee(
|
||||
|
||||
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignment struct {
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
InclusionListCommitteeSlot primitives.Slot
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
}
|
||||
|
||||
// VerifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
@@ -446,22 +445,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
}
|
||||
}
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().Eip7805ForkEpoch {
|
||||
// Retrieve inclusion list committee assignments for the slot and update the assignments map.
|
||||
indices, err := GetInclusionListCommittee(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get inclusion list committee")
|
||||
}
|
||||
for _, vIndex := range indices {
|
||||
if _, exists := vals[vIndex]; !exists {
|
||||
continue
|
||||
}
|
||||
if _, exists := assignments[vIndex]; !exists {
|
||||
assignments[vIndex] = &CommitteeAssignment{}
|
||||
}
|
||||
assignments[vIndex].InclusionListCommitteeSlot = slot
|
||||
}
|
||||
}
|
||||
}
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilIl = errors.New("nil inclusion list")
|
||||
errNilCommitteeRoot = errors.New("nil inclusion list committee root")
|
||||
errNilSignature = errors.New("nil signature")
|
||||
errIncorrectState = errors.New("incorrect state version")
|
||||
)
|
||||
|
||||
// ValidateNilSignedInclusionList validates that a SignedInclusionList is not nil and contains a signature.
|
||||
func ValidateNilSignedInclusionList(il *eth.SignedInclusionList) error {
|
||||
if il == nil {
|
||||
return errNilIl
|
||||
}
|
||||
if il.Signature == nil {
|
||||
return errNilSignature
|
||||
}
|
||||
return ValidateNilInclusionList(il.Message)
|
||||
}
|
||||
|
||||
// ValidateNilInclusionList validates that an InclusionList is not nil and contains a committee root.
|
||||
func ValidateNilInclusionList(il *eth.InclusionList) error {
|
||||
if il == nil {
|
||||
return errNilIl
|
||||
}
|
||||
if il.InclusionListCommitteeRoot == nil {
|
||||
return errNilCommitteeRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInclusionListCommittee retrieves the validator indices assigned to the inclusion list committee
|
||||
// for a given slot. Returns an error if the state or slot does not meet the required constraints.
|
||||
func GetInclusionListCommittee(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().Eip7805ForkEpoch {
|
||||
return nil, errIncorrectState
|
||||
}
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainInclusionListCommittee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get seed")
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start := uint64(slot%params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().InclusionListCommitteeSize
|
||||
end := start + params.BeaconConfig().InclusionListCommitteeSize
|
||||
|
||||
shuffledIndices := make([]primitives.ValidatorIndex, len(indices))
|
||||
copy(shuffledIndices, indices)
|
||||
shuffledList, err := UnshuffleList(shuffledIndices, seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// ValidateInclusionListSignature verifies the signature on a SignedInclusionList against the public key
|
||||
// of the validator specified in the inclusion list.
|
||||
func ValidateInclusionListSignature(ctx context.Context, st state.ReadOnlyBeaconState, il *eth.SignedInclusionList) error {
|
||||
if err := ValidateNilSignedInclusionList(il); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val, err := st.ValidatorAtIndex(il.Message.ValidatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pub, err := bls.PublicKeyFromBytes(val.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(il.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), currentEpoch, params.BeaconConfig().DomainInclusionListCommittee, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, err := signing.ComputeSigningRoot(il.Message, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !sig.Verify(pub, root[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"block_reader.go",
|
||||
"deposit.go",
|
||||
"engine_client.go",
|
||||
"engine_client_focil.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"log_processing.go",
|
||||
@@ -63,7 +62,6 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//beacon/engine:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
|
||||
@@ -129,27 +129,19 @@ type Reconstructor interface {
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests, ilTxs [][]byte) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
GetInclusionList(ctx context.Context, parentHash [32]byte) ([][]byte, error)
|
||||
UpdatePayloadWithInclusionList(ctx context.Context, payloadID primitives.PayloadID, txs [][]byte) (*primitives.PayloadID, error)
|
||||
}
|
||||
|
||||
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload request calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(
|
||||
ctx context.Context,
|
||||
payload interfaces.ExecutionData,
|
||||
versionedHashes []common.Hash,
|
||||
parentBlockRoot *common.Hash,
|
||||
executionRequests *pb.ExecutionRequests,
|
||||
ilTxs [][]byte) ([]byte, error) {
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -179,25 +171,12 @@ func (s *Service) NewPayload(
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
} else if ilTxs == nil {
|
||||
flattenedRequests, err := pb.EncodeExecutionRequests(executionRequests)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to encode execution requests")
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
} else {
|
||||
flattenedRequests, err := pb.EncodeExecutionRequests(executionRequests)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to encode execution requests")
|
||||
}
|
||||
hexIlTxs := make([]hexutil.Bytes, len(ilTxs))
|
||||
for i, tx := range ilTxs {
|
||||
hexIlTxs[i] = tx
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, NewPayloadMethodV5, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests, hexIlTxs)
|
||||
err = s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
@@ -217,8 +196,6 @@ func (s *Service) NewPayload(
|
||||
return result.LatestValidHash, ErrInvalidPayloadStatus
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
case pb.PayloadStatus_INCLUSION_LIST_NOT_SATISFIED:
|
||||
return result.LatestValidHash, ErrBadInclusionListPayloadStatus
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
const (
|
||||
NewPayloadMethodV5 = "engine_newPayloadV5" // Do we really need this?
|
||||
GetInclusionListV1 = "engine_getInclusionListV1"
|
||||
UpdatePayloadWithInclusionListV1 = "engine_updatePayloadWithInclusionListV1"
|
||||
)
|
||||
|
||||
// GetInclusionList fetches the inclusion list for a given parent hash by invoking the execution engine RPC.
|
||||
// It uses a context with a timeout defined by the Beacon configuration.
|
||||
// Implements: https://github.com/ethereum/execution-apis/pull/609
|
||||
func (s *Service) GetInclusionList(ctx context.Context, parentHash [32]byte) ([][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "execution.GetInclusionList")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getInclusionListLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
|
||||
defer cancel()
|
||||
|
||||
var result []hexutil.Bytes
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetInclusionListV1, common.Hash(parentHash))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
bytesResult := make([][]byte, len(result))
|
||||
for i, b := range result {
|
||||
bytesResult[i] = b
|
||||
}
|
||||
|
||||
return bytesResult, nil
|
||||
}
|
||||
|
||||
// UpdatePayloadWithInclusionList updates a payload with a provided inclusion list of transactions.
|
||||
// It uses a context with a timeout defined by the Beacon configuration and returns the new payload ID.
|
||||
// Implements: https://github.com/ethereum/execution-apis/pull/609
|
||||
func (s *Service) UpdatePayloadWithInclusionList(ctx context.Context, payloadID primitives.PayloadID, txs [][]byte) (*primitives.PayloadID, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "execution.UpdatePayloadWithInclusionList")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
updatePayloadWithInclusionListLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
|
||||
defer cancel()
|
||||
|
||||
hexTxs := make([]hexutil.Bytes, len(txs))
|
||||
for i, tx := range txs {
|
||||
hexTxs[i] = tx
|
||||
}
|
||||
|
||||
result := &engine.PayloadID{}
|
||||
err := s.rpcClient.CallContext(ctx, result, UpdatePayloadWithInclusionListV1, engine.PayloadID(payloadID), hexTxs)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
return (*primitives.PayloadID)(result), nil
|
||||
}
|
||||
@@ -127,7 +127,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -138,7 +138,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -603,7 +603,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -617,7 +617,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -631,7 +631,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -670,7 +670,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -684,7 +684,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -698,7 +698,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -712,7 +712,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -751,7 +751,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -765,7 +765,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -779,7 +779,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -793,7 +793,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -831,7 +831,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -845,7 +845,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -859,7 +859,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -873,7 +873,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -912,7 +912,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -926,7 +926,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil, nil)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
|
||||
@@ -34,6 +34,5 @@ var (
|
||||
// ErrRequestTooLarge when the request is too large
|
||||
ErrRequestTooLarge = errors.New("request too large")
|
||||
// ErrUnsupportedVersion represents a case where a payload is requested for a block type that doesn't have a known mapping.
|
||||
ErrUnsupportedVersion = errors.New("unknown ExecutionPayload schema for block version")
|
||||
ErrBadInclusionListPayloadStatus = errors.New("payload did not satisfy inclusion list")
|
||||
ErrUnsupportedVersion = errors.New("unknown ExecutionPayload schema for block version")
|
||||
)
|
||||
|
||||
@@ -71,18 +71,4 @@ var (
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
getInclusionListLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_inclusion_list_v1_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getInclusionListV1 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
updatePayloadWithInclusionListLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "update_payload_inclusion_list_v1_latency_milliseconds",
|
||||
Help: "Captures RPC latency for updatePayloadWithInclusionListV1 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,7 +43,8 @@ type EngineClient struct {
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
func (e *EngineClient) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests, ilTxs [][]byte) ([]byte, error) {
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash, _ *pb.ExecutionRequests) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -170,11 +171,3 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EngineClient) GetInclusionList(ctx context.Context, parentHash [32]byte) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (e *EngineClient) UpdatePayloadWithInclusionList(ctx context.Context, payloadID primitives.PayloadID, txs [][]byte) (*primitives.PayloadID, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attester_head.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
package doublylinkedtree
|
||||
|
||||
// GetAttesterHead returns the attester head root given inclusion list satisfaction.
|
||||
func (f *ForkChoice) GetAttesterHead() [32]byte {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return head.root
|
||||
}
|
||||
if head.notSatisfyingInclusionList {
|
||||
return parent.root
|
||||
}
|
||||
return head.root
|
||||
}
|
||||
@@ -29,6 +29,7 @@ func New() *ForkChoice {
|
||||
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
safeHeadRoot: [32]byte{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
@@ -70,11 +71,115 @@ func (f *ForkChoice) Head(
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: jc.Epoch,
|
||||
finalizedEpoch: fc.Epoch,
|
||||
currentSlot: currentSlot,
|
||||
secondsSinceSlotStart: secondsSinceSlotStart,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
pbRoot: f.store.proposerBoostRoot,
|
||||
pbValue: f.store.previousProposerBoostScore,
|
||||
}); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "Could not update best descendant")
|
||||
}
|
||||
h, err := f.store.head(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "Could not get head")
|
||||
}
|
||||
|
||||
// Return early if the head is not the highest received node before updating the safe head.
|
||||
if f.store.highestReceivedNode.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if err := f.updateSafeHead(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not update safe head")
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// updateSafeHead updates the safe head in the fork choice store.
|
||||
func (f *ForkChoice) updateSafeHead(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
oldSafeHeadRoot := f.store.safeHeadRoot
|
||||
newSafeHeadRoot, err := f.store.safeHead(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not get safe head")
|
||||
}
|
||||
|
||||
// If the safe head has not changed, return early.
|
||||
if oldSafeHeadRoot == newSafeHeadRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update safe head
|
||||
f.store.safeHeadRoot = newSafeHeadRoot
|
||||
|
||||
f.logSafeHead(ctx, newSafeHeadRoot, oldSafeHeadRoot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ForkChoice) logSafeHead(ctx context.Context, newSafeHeadRoot [32]byte, oldSafeHeadRoot [32]byte) {
|
||||
newSafeHeadNode, ok := f.store.nodeByRoot[newSafeHeadRoot]
|
||||
if !ok || newSafeHeadNode == nil {
|
||||
log.WithError(ErrNilNode).Error("Could not find new safe head node")
|
||||
return
|
||||
}
|
||||
newSafeHeadSlot := newSafeHeadNode.slot
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": fmt.Sprintf("%d", currentSlot),
|
||||
"sinceSlotStartTime": fmt.Sprintf("%d", secondsSinceSlotStart.Milliseconds()),
|
||||
"newSafeHeadSlot": fmt.Sprintf("%d", newSafeHeadSlot),
|
||||
"newSafeHeadRoot": fmt.Sprintf("%#x", newSafeHeadRoot),
|
||||
"weight": fmt.Sprintf("%d", newSafeHeadNode.weight),
|
||||
}).Info("Safe head has changed")
|
||||
|
||||
// Update metrics.
|
||||
safeHeadSlotNumber.Set(float64(newSafeHeadSlot))
|
||||
|
||||
// Check if the safe head reorged.
|
||||
commonRoot, forkSlot, err := f.CommonAncestor(ctx, oldSafeHeadRoot, newSafeHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
return
|
||||
}
|
||||
|
||||
// The safe head has reorged. This is bad!
|
||||
if oldSafeHeadRoot != [32]byte{} && commonRoot != oldSafeHeadRoot {
|
||||
oldSafeHeadNode, ok := f.store.nodeByRoot[oldSafeHeadRoot]
|
||||
if !ok || oldSafeHeadNode == nil {
|
||||
log.WithError(ErrNilNode).Error("Could not find old safe head node")
|
||||
return
|
||||
}
|
||||
oldSafeHeadSlot := oldSafeHeadNode.slot
|
||||
dis := oldSafeHeadSlot + newSafeHeadSlot - 2*forkSlot
|
||||
dep := max(uint64(oldSafeHeadSlot-forkSlot), uint64(newSafeHeadSlot-forkSlot))
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldSafeHeadSlot": fmt.Sprintf("%d", oldSafeHeadSlot),
|
||||
"oldSafeHeadRoot": fmt.Sprintf("%#x", oldSafeHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": dis,
|
||||
"depth": dep,
|
||||
}).Error("Safe head reorg occurred")
|
||||
|
||||
safeHeadReorgDistance.Observe(float64(dis))
|
||||
safeHeadReorgDepth.Observe(float64(dep))
|
||||
safeHeadReorgCount.Inc()
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
}
|
||||
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
@@ -537,6 +642,23 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// SafeBlockHash returns the hash of the payload at the safe head
|
||||
func (f *ForkChoice) SafeBlockHash() [32]byte {
|
||||
switch params.BeaconConfig().SafeBlockAlgorithm {
|
||||
case "justified":
|
||||
return f.JustifiedPayloadBlockHash()
|
||||
case "fast-confirmation":
|
||||
safeHeadRoot := f.store.safeHeadRoot
|
||||
node, ok := f.store.nodeByRoot[safeHeadRoot]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
default:
|
||||
return f.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
jc := ðpb.Checkpoint{
|
||||
@@ -570,6 +692,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
|
||||
resp := &forkchoice2.Dump{
|
||||
JustifiedCheckpoint: jc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
SafeHeadRoot: f.store.safeHeadRoot[:],
|
||||
FinalizedCheckpoint: fc,
|
||||
UnrealizedFinalizedCheckpoint: ufc,
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon State with the given data to mock
|
||||
@@ -229,7 +230,13 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 6,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
|
||||
r1 := [32]byte{'1'}
|
||||
@@ -923,3 +930,85 @@ func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
}
|
||||
|
||||
func TestForkChoiceSafeHead(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
balances := []uint64{32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
|
||||
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
|
||||
return balances, nil
|
||||
}
|
||||
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
|
||||
require.Equal(t, uint64(len(balances)), f.numActiveValidators)
|
||||
require.Equal(t, uint64(10), f.store.committeeWeight)
|
||||
require.DeepEqual(t, balances, f.justifiedBalances)
|
||||
proposerScoreBoost := params.BeaconConfig().ProposerScoreBoost
|
||||
require.Equal(t, uint64(40), proposerScoreBoost)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
require.Equal(t, primitives.Slot(32), slotsPerEpoch)
|
||||
|
||||
driftGenesisTime(f, primitives.Slot(11), 0)
|
||||
st, b, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
for i := 2; i < 10; i++ {
|
||||
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(i), indexToHash(uint64(i)), indexToHash(uint64(i-1)), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
}
|
||||
// Add a node at slot 11 to ensure highest received node is at current slot
|
||||
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(11), indexToHash(uint64(11)), indexToHash(uint64(9)), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, b))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentSlot primitives.Slot
|
||||
nodeBalances []uint64
|
||||
wantRoot [32]byte
|
||||
}{
|
||||
{
|
||||
name: "safeHead is head-1",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 14, 10},
|
||||
wantRoot: indexToHash(9),
|
||||
},
|
||||
{
|
||||
name: "safeHead is head-2",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
|
||||
wantRoot: indexToHash(9),
|
||||
},
|
||||
{
|
||||
name: "safeHead is head-3",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 10},
|
||||
wantRoot: indexToHash(7),
|
||||
},
|
||||
{
|
||||
name: "safeHead is justified",
|
||||
currentSlot: primitives.Slot(11),
|
||||
nodeBalances: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
wantRoot: params.BeaconConfig().ZeroHash,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
driftGenesisTime(f, tc.currentSlot, 0)
|
||||
require.Equal(t, tc.currentSlot, slots.CurrentSlot(f.store.genesisTime))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[params.BeaconConfig().ZeroHash].balance = tc.nodeBalances[0]
|
||||
for i := 1; i < 10; i++ {
|
||||
s.nodeByRoot[indexToHash(uint64(i))].balance = tc.nodeBalances[i]
|
||||
}
|
||||
s.nodeByRoot[indexToHash(uint64(11))].balance = tc.nodeBalances[10]
|
||||
|
||||
_, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
safeHead := f.store.safeHeadRoot
|
||||
require.Equal(t, tc.wantRoot, safeHead)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,4 +51,28 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
safeHeadSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "safe_head_slot",
|
||||
Help: "The slot number of the current safe head.",
|
||||
},
|
||||
)
|
||||
safeHeadReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "safe_head_reorgs_total",
|
||||
Help: "Count the number of safe head reorgs",
|
||||
})
|
||||
safeHeadReorgDistance = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "safe_head_reorg_distance",
|
||||
Help: "Captures distance of safe head reorgs. Distance is defined as the number of blocks between the old safe head and the new safe head",
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
|
||||
},
|
||||
)
|
||||
safeHeadReorgDepth = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "safe_head_reorg_depth",
|
||||
Help: "Captures depth of safe head reorgs. Depth is defined as the number of blocks between the safe heads and the common ancestor",
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -10,14 +10,27 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessAttestationsThreshold is the amount of time after which we
|
||||
// process attestations for the current slot
|
||||
const ProcessAttestationsThreshold = 10 * time.Second
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
type updateDescendantArgs struct {
|
||||
justifiedEpoch primitives.Epoch
|
||||
finalizedEpoch primitives.Epoch
|
||||
currentSlot primitives.Slot
|
||||
secondsSinceSlotStart time.Duration
|
||||
committeeWeight uint64
|
||||
pbRoot [32]byte
|
||||
pbValue uint64
|
||||
}
|
||||
|
||||
// applyWeightChanges recursively traverses a tree of nodes to update each node's total weight and
|
||||
// weight without proposer boost by summing the balance of the node and its children.
|
||||
// If the node matches a specific root (`pbRoot`), it subtracts a given boost value (`pbValue`) from the weight without boost,
|
||||
// ensuring the balance is sufficient. It also handles context cancellation and errors during recursion.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
@@ -37,14 +50,51 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxWeight computes the maximum possible voting weight for this node.
|
||||
// This function computes the maximum weight a node can contribute from its start slot to the end slot,
|
||||
// scaled by committee weight. If the range is within one epoch, it returns the number of slots times the committee weight.
|
||||
// If the range spans at least one full epoch or starts at an epoch boundary and ends in the next epoch, it returns the full epoch weight.
|
||||
// Otherwise, it prorates the weight based on the number of slots in the start and end epochs, accounting for partial epoch coverage.
|
||||
func (n *Node) maxWeight(endSlot primitives.Slot, committeeWeight uint64) uint64 {
|
||||
startSlot := n.slot
|
||||
if n.parent != nil {
|
||||
startSlot = n.parent.slot + 1
|
||||
}
|
||||
if startSlot > endSlot {
|
||||
return 0
|
||||
}
|
||||
|
||||
startEpoch := slots.ToEpoch(startSlot)
|
||||
endEpoch := slots.ToEpoch(endSlot)
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
slotSpan := uint64(endSlot - startSlot + 1)
|
||||
|
||||
if startEpoch == endEpoch {
|
||||
return committeeWeight * slotSpan
|
||||
}
|
||||
|
||||
if endEpoch > startEpoch+1 || (endEpoch == startEpoch+1 && uint64(startSlot)%slotsPerEpoch == 0) {
|
||||
return committeeWeight * slotsPerEpoch
|
||||
}
|
||||
|
||||
slotsInStartEpoch := slotsPerEpoch - (uint64(startSlot) % slotsPerEpoch)
|
||||
slotsInEndEpoch := (uint64(endSlot) % slotsPerEpoch) + 1
|
||||
|
||||
weightEnd := committeeWeight * slotsInEndEpoch
|
||||
weightStart := (committeeWeight * slotsInStartEpoch * (slotsPerEpoch - slotsInEndEpoch)) / slotsPerEpoch
|
||||
|
||||
return weightEnd + weightStart
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, args *updateDescendantArgs) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.bestDescendant = nil
|
||||
n.bestConfirmedDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -55,10 +105,11 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
if err := child.updateBestDescendant(ctx, args); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
currentEpoch := slots.ToEpoch(args.currentSlot)
|
||||
childLeadsToViableHead := child.leadsToViableHead(args.justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
@@ -79,13 +130,33 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
}
|
||||
}
|
||||
if hasViableDescendant {
|
||||
// This node has a viable descendant.
|
||||
if bestChild.bestDescendant == nil {
|
||||
// The best descendant is the best child.
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
// The best descendant is more than 1 hop away.
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
|
||||
if uint64(args.secondsSinceSlotStart.Seconds()) < params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot {
|
||||
prevSlot := primitives.Slot(0)
|
||||
if args.currentSlot > 1 {
|
||||
prevSlot = args.currentSlot - 1
|
||||
}
|
||||
|
||||
if bestChild.confirmed(prevSlot, args.committeeWeight, args.pbRoot, args.pbValue) {
|
||||
n.bestConfirmedDescendant = bestChild.bestConfirmedDescendant
|
||||
if n.bestConfirmedDescendant == nil {
|
||||
n.bestConfirmedDescendant = bestChild
|
||||
}
|
||||
} else {
|
||||
n.bestConfirmedDescendant = nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
n.bestDescendant = nil
|
||||
n.bestConfirmedDescendant = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -192,3 +263,38 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// confirmed returns true if the node satisfies the confirmation rule.
|
||||
func (n *Node) confirmed(slot primitives.Slot, committeeWeight uint64, pbRoot [32]byte, pbValue uint64) bool {
|
||||
if n.slot > slot {
|
||||
return false
|
||||
}
|
||||
|
||||
pbWeight := committeeWeight * params.BeaconConfig().ProposerScoreBoost / 100
|
||||
maxWeight := n.maxWeight(slot, committeeWeight)
|
||||
byzantineWeight := maxWeight * params.BeaconConfig().FastConfirmationByzantineThreshold / 100
|
||||
threshold := (maxWeight+pbWeight)/2 + byzantineWeight
|
||||
|
||||
nodeWeight := n.weight
|
||||
|
||||
var pbWeightSubtracted bool
|
||||
if n.root == pbRoot || (n.bestDescendant != nil && n.bestDescendant.root == pbRoot) {
|
||||
if nodeWeight < pbValue {
|
||||
return false
|
||||
}
|
||||
nodeWeight -= pbValue
|
||||
pbWeightSubtracted = true
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"nodeSlot": n.slot,
|
||||
"committeeWeight": committeeWeight,
|
||||
"maxWeight": maxWeight,
|
||||
"nodeWeight": nodeWeight,
|
||||
"threshold": threshold,
|
||||
"pbWeightSubtracted": pbWeightSubtracted,
|
||||
}).Info("Checking confirmation")
|
||||
|
||||
return nodeWeight > threshold
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -110,12 +111,60 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 2,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_BestConfirmedDescendant(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
// Insert first child node
|
||||
state1, blk1, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state1, blk1))
|
||||
|
||||
// Insert second child node
|
||||
state2, blk2, err := prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state2, blk2))
|
||||
|
||||
s := f.store
|
||||
|
||||
// Set weightWithoutBoost manually to control confirmation logic
|
||||
node1 := s.nodeByRoot[indexToHash(1)]
|
||||
node2 := s.nodeByRoot[indexToHash(2)]
|
||||
|
||||
node1.weight = 100
|
||||
node2.weight = 200
|
||||
|
||||
// Execute update
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 3,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the correct bestConfirmedDescendant is selected
|
||||
assert.NotNil(t, s.treeRootNode.bestConfirmedDescendant, "expected bestConfirmedDescendant to be set")
|
||||
assert.Equal(t, node2, s.treeRootNode.bestConfirmedDescendant, "expected node2 to be the bestConfirmedDescendant")
|
||||
|
||||
// Additional: verify that the best descendant logic is consistent
|
||||
assert.Equal(t, node2, s.treeRootNode.bestDescendant, "expected node2 to be the bestDescendant")
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
@@ -130,7 +179,13 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
currentSlot: 2,
|
||||
secondsSinceSlotStart: 0,
|
||||
committeeWeight: f.store.committeeWeight,
|
||||
}))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
@@ -327,3 +382,238 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, false, late)
|
||||
}
|
||||
|
||||
func TestNode_maxWeight(t *testing.T) {
|
||||
type fields struct {
|
||||
slot primitives.Slot
|
||||
parent *Node
|
||||
}
|
||||
type args struct {
|
||||
endSlot primitives.Slot
|
||||
committeeWeight uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
name: "startSlot > endSlot, should return 0",
|
||||
fields: fields{
|
||||
parent: &Node{slot: 9},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 9,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "startEpoch == currentEpoch",
|
||||
fields: fields{
|
||||
parent: &Node{slot: 4},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 7,
|
||||
committeeWeight: 10,
|
||||
},
|
||||
want: 30, // (7 - 5 + 1) = 30
|
||||
},
|
||||
{
|
||||
name: "currentEpoch > startEpoch + 1",
|
||||
fields: fields{
|
||||
slot: 0,
|
||||
},
|
||||
args: args{
|
||||
endSlot: 32,
|
||||
committeeWeight: 10,
|
||||
},
|
||||
want: 320, // slotsPerEpoch * committeeWeight
|
||||
},
|
||||
{
|
||||
name: "currentEpoch == startEpoch+1 && startSlot % slotsPerEpoch == 0",
|
||||
fields: fields{
|
||||
parent: &Node{
|
||||
slot: 31,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
endSlot: 64,
|
||||
committeeWeight: 5,
|
||||
},
|
||||
want: 160, // slotsPerEpoch * committeeWeight
|
||||
},
|
||||
{
|
||||
name: "partial overlap between epochs",
|
||||
fields: fields{
|
||||
slot: 30,
|
||||
},
|
||||
args: args{
|
||||
endSlot: 33,
|
||||
committeeWeight: 4,
|
||||
},
|
||||
want: func() uint64 {
|
||||
startSlot := uint64(30)
|
||||
currentSlot := uint64(33)
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
slotsInStartEpoch := slotsPerEpoch - (startSlot % slotsPerEpoch) // 32 - 30 = 2
|
||||
slotsInCurrentEpoch := (currentSlot % slotsPerEpoch) + 1 // 33 % 32 + 1 = 2
|
||||
|
||||
weightStart := (4 * slotsInStartEpoch * (slotsPerEpoch - slotsInCurrentEpoch)) / slotsPerEpoch // 4 * 2 * 30 / 32 = 7 (int division)
|
||||
weightCurrent := 4 * slotsInCurrentEpoch // 4 * 2 = 8
|
||||
return weightStart + weightCurrent // 7 + 8 = 15
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &Node{
|
||||
slot: tt.fields.slot,
|
||||
parent: tt.fields.parent,
|
||||
}
|
||||
if got := n.maxWeight(tt.args.endSlot, tt.args.committeeWeight); got != tt.want {
|
||||
t.Errorf("maxWeight() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_confirmed(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FastConfirmationByzantineThreshold = 33
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
type fields struct {
|
||||
nodeSlot primitives.Slot
|
||||
weight uint64
|
||||
root [32]byte
|
||||
bestDescendant *Node
|
||||
}
|
||||
type args struct {
|
||||
slot primitives.Slot
|
||||
committeeWeight uint64
|
||||
pbRoot [32]byte
|
||||
pbValue uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "node slot > slot returns false",
|
||||
fields: fields{
|
||||
nodeSlot: 10,
|
||||
},
|
||||
args: args{
|
||||
slot: 9,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "weight without boost <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 186, // 200 committee weight, 40 pb weight, 66 byzantine weight
|
||||
bestDescendant: &Node{},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "weight without boost > threshold returns true",
|
||||
fields: fields{
|
||||
weight: 187, // 200 committee weight, 40 pb weight, 66 byzantine weight
|
||||
bestDescendant: &Node{},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot but balance < pbValue returns false",
|
||||
fields: fields{
|
||||
weight: 187, bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 100000000,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot, balance >= pbValue, adjusted weight <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 187,
|
||||
bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot (self), balance >= pbValue, adjusted weight <= threshold returns false",
|
||||
fields: fields{
|
||||
weight: 187,
|
||||
root: [32]byte{1},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "node root matches pbRoot, balance >= pbValue, adjusted weight > threshold returns true",
|
||||
fields: fields{
|
||||
weight: 188,
|
||||
bestDescendant: &Node{
|
||||
root: [32]byte{1},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slot: 1,
|
||||
committeeWeight: 100,
|
||||
pbRoot: [32]byte{1},
|
||||
pbValue: 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &Node{
|
||||
slot: tt.fields.nodeSlot,
|
||||
weight: tt.fields.weight,
|
||||
root: tt.fields.root,
|
||||
bestDescendant: tt.fields.bestDescendant,
|
||||
}
|
||||
if got := n.confirmed(tt.args.slot, tt.args.committeeWeight, tt.args.pbRoot, tt.args.pbValue); got != tt.want {
|
||||
t.Errorf("confirmed() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,6 +147,11 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
@@ -156,23 +161,5 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
if sss >= orphanLateBlockProposingEarly*time.Second {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Newly added in EIP-7805
|
||||
// reorg_prerequisites = all([shuffling_stable, ffg_competitive, finalization_ok,
|
||||
// proposing_on_time, single_slot_reorg, head_weak, parent_strong])
|
||||
//
|
||||
// # Check that the head block is in the unsatisfied inclusion list blocks
|
||||
// inclusion_list_not_satisfied = head_root in store.unsatisfied_inclusion_list_blocks # [New in EIP-7805]
|
||||
//
|
||||
// if reorg_prerequisites and (head_late or inclusion_list_not_satisfied):
|
||||
// return parent_root
|
||||
// else:
|
||||
// return head_root
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong and satisfies inclusion list
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold && !head.notSatisfyingInclusionList {
|
||||
return head.root
|
||||
}
|
||||
|
||||
return parent.root
|
||||
}
|
||||
|
||||
@@ -62,6 +62,39 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestDescendant.root, nil
|
||||
}
|
||||
|
||||
// safeHead starts from justified root and then follows the best descendant links
|
||||
// to find the best safe head block.
|
||||
func (s *Store) safeHead(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.safeHead")
|
||||
defer span.End()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
justifiedNode = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
|
||||
}
|
||||
}
|
||||
|
||||
// If the justified node doesn't have a best confirmed descendant,
|
||||
// the best node is itself.
|
||||
bestConfirmedDescendant := justifiedNode.bestConfirmedDescendant
|
||||
if bestConfirmedDescendant == nil {
|
||||
bestConfirmedDescendant = justifiedNode
|
||||
}
|
||||
|
||||
return bestConfirmedDescendant.root, nil
|
||||
}
|
||||
|
||||
// insert registers a new block node to the fork choice store's node list.
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
@@ -90,17 +123,16 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
notSatisfyingInclusionList: roblock.Block().NotSatisfyingInclusionList(),
|
||||
slot: slot,
|
||||
root: root,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
@@ -147,7 +179,18 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update best descendants
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
secondsSinceSlotStart = 0
|
||||
}
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
|
||||
justifiedEpoch: jEpoch,
|
||||
finalizedEpoch: fEpoch,
|
||||
currentSlot: currentSlot,
|
||||
secondsSinceSlotStart: secondsSinceSlotStart,
|
||||
committeeWeight: s.committeeWeight,
|
||||
}); err != nil {
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
|
||||
@@ -630,3 +630,110 @@ func TestStore_HighestReceivedBlockDelay(t *testing.T) {
|
||||
|
||||
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
|
||||
}
|
||||
|
||||
func TestStore_safeHead(t *testing.T) {
|
||||
genesisEpoch := params.BeaconConfig().GenesisEpoch
|
||||
root1 := [32]byte{0x01}
|
||||
root2 := [32]byte{0x02}
|
||||
root3 := [32]byte{0x03}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupStore func() *Store
|
||||
wantRoot [32]byte
|
||||
expectErr bool
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "context cancelled returns error",
|
||||
setupStore: func() *Store {
|
||||
return &Store{}
|
||||
},
|
||||
wantRoot: [32]byte{},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "justified root missing and not genesis returns error",
|
||||
setupStore: func() *Store {
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: make(map[[32]byte]*Node),
|
||||
}
|
||||
},
|
||||
wantRoot: [32]byte{},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "justified is genesis, uses tree root",
|
||||
setupStore: func() *Store {
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: genesisEpoch,
|
||||
Root: [32]byte{}, // zero hash
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{},
|
||||
treeRootNode: &Node{root: root2},
|
||||
}
|
||||
},
|
||||
wantRoot: root2,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "justified exists with no best confirmed descendant",
|
||||
setupStore: func() *Store {
|
||||
node := &Node{root: root1}
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{
|
||||
root1: node,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantRoot: root1,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "justified exists with best confirmed descendant",
|
||||
setupStore: func() *Store {
|
||||
descendant := &Node{root: root3}
|
||||
node := &Node{root: root1, bestConfirmedDescendant: descendant}
|
||||
return &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: root1,
|
||||
},
|
||||
nodeByRoot: map[[32]byte]*Node{
|
||||
root1: node,
|
||||
},
|
||||
}
|
||||
},
|
||||
wantRoot: root3,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
store := tt.setupStore()
|
||||
ctx := context.Background()
|
||||
if tt.name == "context cancelled returns error" {
|
||||
c, cancel := context.WithCancel(ctx)
|
||||
cancel()
|
||||
ctx = c
|
||||
}
|
||||
got, err := store.safeHead(ctx)
|
||||
if (err != nil) != tt.expectErr {
|
||||
t.Fatalf("expected error: %v, got: %v", tt.expectErr, err)
|
||||
}
|
||||
if err == nil && got != tt.wantRoot {
|
||||
t.Errorf("safeHead() = %x, want %x", got, tt.wantRoot)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type Store struct {
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
safeHeadRoot [fieldparams.RootLength]byte // safe head root in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
@@ -47,22 +48,22 @@ type Store struct {
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
// This is used as an array based stateful DAG for efficient fork choice look up.
|
||||
type Node struct {
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
children []*Node // the list of direct children of this Node
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
notSatisfyingInclusionList bool // whether the node is not satisfying the inclusion list
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
children []*Node // the list of direct children of this Node
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
bestConfirmedDescendant *Node // bestConfirmedDescendant node of this node.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -38,7 +38,6 @@ type RLocker interface {
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context) ([32]byte, error)
|
||||
GetProposerHead() [32]byte
|
||||
GetAttesterHead() [32]byte
|
||||
CachedHeadRoot() [32]byte
|
||||
}
|
||||
|
||||
@@ -84,6 +83,7 @@ type FastGetter interface {
|
||||
DependentRoot(primitives.Epoch) ([32]byte, error)
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
SafeBlockHash() [32]byte
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
ParentRoot(root [32]byte) ([32]byte, error)
|
||||
}
|
||||
|
||||
@@ -100,6 +100,13 @@ func (ro *ROForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return ro.getter.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// SafeBlockHash delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) SafeBlockHash() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.SafeBlockHash()
|
||||
}
|
||||
|
||||
// NodeCount delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) NodeCount() int {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -27,6 +27,7 @@ const (
|
||||
previousJustifiedCheckpointCalled
|
||||
justifiedPayloadBlockHashCalled
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
safeBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
@@ -107,6 +108,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: unrealizedJustifiedPayloadBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.UnrealizedJustifiedPayloadBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "safeBlockHashCalled",
|
||||
call: safeBlockHashCalled,
|
||||
cb: func(g FastGetter) { g.SafeBlockHash() },
|
||||
},
|
||||
{
|
||||
name: "nodeCountCalled",
|
||||
call: nodeCountCalled,
|
||||
@@ -249,6 +255,11 @@ func (ro *mockROForkchoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) SafeBlockHash() [32]byte {
|
||||
ro.calls = append(ro.calls, safeBlockHashCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) NodeCount() int {
|
||||
ro.calls = append(ro.calls, nodeCountCalled)
|
||||
return 0
|
||||
|
||||
@@ -195,3 +195,32 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureSafeBlockConfig(cliCtx *cli.Context) error {
|
||||
c := params.BeaconConfig().Copy()
|
||||
|
||||
if cliCtx.IsSet(flags.FastConfirmationByzantineThreshold.Name) {
|
||||
threshold := cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
|
||||
if threshold > 100 {
|
||||
return fmt.Errorf("fast-confirmation-byzantine-threshold must be between 0 and 100")
|
||||
}
|
||||
c.FastConfirmationByzantineThreshold = cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cliCtx.IsSet(flags.SafeBlock.Name) {
|
||||
safeBlock := cliCtx.String(flags.SafeBlock.Name)
|
||||
switch safeBlock {
|
||||
case "justified", "unrealized-justified", "fast-confirmation":
|
||||
default:
|
||||
return fmt.Errorf("invalid safe-block option: %s", safeBlock)
|
||||
}
|
||||
c.SafeBlockAlgorithm = safeBlock
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -129,7 +129,6 @@ type BeaconNode struct {
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
inclusionLists *cache.InclusionLists
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -162,7 +161,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
blsToExecPool: blstoexec.NewPool(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
payloadIDCache: cache.NewPayloadIDCache(),
|
||||
inclusionLists: cache.NewInclusionLists(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
@@ -291,6 +289,10 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
if err := configureSafeBlockConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure safe block config")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -804,7 +806,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
blockchain.WithInclusionListCache(b.inclusionLists),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -894,7 +895,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
regularsync.WithInclusionListsCache(b.inclusionLists),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -1042,7 +1042,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
InclusionListsCache: b.inclusionLists,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -134,9 +134,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipInclusionList):
|
||||
// TODO: Update this properly for inclusion list
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
InclusionListTopicFormat: func() proto.Message { return ðpb.SignedInclusionList{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
@@ -145,7 +144,4 @@ func init() {
|
||||
|
||||
// Specially handle Fulu objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockFulu{})] = BlockSubnetTopicFormat
|
||||
|
||||
// Specially handle InclusionList objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedInclusionList{})] = InclusionListTopicFormat
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
|
||||
const (
|
||||
// overlay parameters
|
||||
gossipSubD = 3 // topic stable mesh target count
|
||||
gossipSubDlo = 2 // topic stable mesh low watermark
|
||||
gossipSubDhi = 4 // topic stable mesh high watermark
|
||||
gossipSubD = 8 // topic stable mesh target count
|
||||
gossipSubDlo = 6 // topic stable mesh low watermark
|
||||
gossipSubDhi = 12 // topic stable mesh high watermark
|
||||
|
||||
// gossip parameters
|
||||
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
|
||||
|
||||
@@ -36,7 +36,6 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
GossipInclusionList = "inclusion_list"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -66,5 +65,4 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
InclusionListTopicFormat = GossipProtocolAndDigest + GossipInclusionList
|
||||
)
|
||||
|
||||
@@ -550,10 +550,12 @@ func (s *Service) GetAttestationData(
|
||||
return nil, &RpcError{Reason: Unavailable, Err: errOptimisticMode}
|
||||
}
|
||||
|
||||
headRoot := s.ChainInfoFetcher.GetAttesterHead() // Attesters vote based on IL constrained head root.
|
||||
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get head root")}
|
||||
}
|
||||
targetEpoch := slots.ToEpoch(req.Slot)
|
||||
targetRoot, err := s.HeadFetcher.TargetRootForEpoch(headRoot, targetEpoch)
|
||||
targetRoot, err := s.HeadFetcher.TargetRootForEpoch(bytesutil.ToBytes32(headRoot), targetEpoch)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get target root")}
|
||||
}
|
||||
@@ -563,7 +565,7 @@ func (s *Service) GetAttestationData(
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get head state")}
|
||||
}
|
||||
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) { // Ensure justified checkpoint safety by processing head state across the boundary.
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], req.Slot)
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
|
||||
}
|
||||
@@ -572,7 +574,7 @@ func (s *Service) GetAttestationData(
|
||||
|
||||
if err = s.AttestationCache.Put(&cache.AttestationConsensusData{
|
||||
Slot: req.Slot,
|
||||
HeadRoot: headRoot[:],
|
||||
HeadRoot: headRoot,
|
||||
Target: forkchoicetypes.Checkpoint{
|
||||
Epoch: targetEpoch,
|
||||
Root: targetRoot,
|
||||
@@ -588,7 +590,7 @@ func (s *Service) GetAttestationData(
|
||||
return ðpb.AttestationData{
|
||||
Slot: req.Slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
BeaconBlockRoot: headRoot[:],
|
||||
BeaconBlockRoot: headRoot,
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: justifiedCheckpoint.Epoch,
|
||||
Root: justifiedCheckpoint.Root,
|
||||
|
||||
@@ -577,12 +577,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
case "DOMAIN_INCLUSION_LIST_COMMITTEE":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "EIP7805_FORK_VERSION":
|
||||
assert.Equal(t, "0x0a000000", v)
|
||||
case "EIP7805_FORK_EPOCH":
|
||||
assert.Equal(t, "18446744073709551615", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -201,6 +201,7 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
|
||||
ExtraData: &structs.ForkChoiceDumpExtraData{
|
||||
UnrealizedJustifiedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedJustifiedCheckpoint),
|
||||
UnrealizedFinalizedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedFinalizedCheckpoint),
|
||||
SafeHeadRoot: hexutil.Encode(dump.SafeHeadRoot),
|
||||
ProposerBoostRoot: hexutil.Encode(dump.ProposerBoostRoot),
|
||||
PreviousProposerBoostRoot: hexutil.Encode(dump.PreviousProposerBoostRoot),
|
||||
HeadRoot: hexutil.Encode(dump.HeadRoot),
|
||||
|
||||
@@ -64,8 +64,6 @@ const (
|
||||
PayloadAttributesTopic = "payload_attributes"
|
||||
// BlobSidecarTopic represents a new blob sidecar event topic
|
||||
BlobSidecarTopic = "blob_sidecar"
|
||||
// InclusionListTopic represents a new inclusion list event topic
|
||||
InclusionListTopic = "inclusion_list"
|
||||
// ProposerSlashingTopic represents a new proposer slashing event topic
|
||||
ProposerSlashingTopic = "proposer_slashing"
|
||||
// AttesterSlashingTopic represents a new attester slashing event topic
|
||||
@@ -106,7 +104,6 @@ var opsFeedEventTopics = map[feed.EventType]string{
|
||||
operation.SyncCommitteeContributionReceived: SyncCommitteeContributionTopic,
|
||||
operation.BLSToExecutionChangeReceived: BLSToExecutionChangeTopic,
|
||||
operation.BlobSidecarReceived: BlobSidecarTopic,
|
||||
operation.InclusionListReceived: InclusionListTopic,
|
||||
operation.AttesterSlashingReceived: AttesterSlashingTopic,
|
||||
operation.ProposerSlashingReceived: ProposerSlashingTopic,
|
||||
operation.BlockGossipReceived: BlockGossipTopic,
|
||||
@@ -447,8 +444,6 @@ func topicForEvent(event *feed.Event) string {
|
||||
return BLSToExecutionChangeTopic
|
||||
case *operation.BlobSidecarReceivedData:
|
||||
return BlobSidecarTopic
|
||||
case *operation.InclusionListReceivedData:
|
||||
return InclusionListTopic
|
||||
case *operation.AttesterSlashingReceivedData:
|
||||
return AttesterSlashingTopic
|
||||
case *operation.ProposerSlashingReceivedData:
|
||||
@@ -581,13 +576,6 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
KzgCommitment: hexutil.Encode(v.Blob.KzgCommitment),
|
||||
})
|
||||
}, nil
|
||||
case *operation.InclusionListReceivedData:
|
||||
return func() io.Reader {
|
||||
return jsonMarshalReader(eventName, structs.InclusionListEvent{
|
||||
Version: "eip7805",
|
||||
Data: structs.SignedInclusionListFromConsensus(v.SignedInclusionList),
|
||||
})
|
||||
}, nil
|
||||
case *operation.AttesterSlashingReceivedData:
|
||||
switch slashing := v.AttesterSlashing.(type) {
|
||||
case *eth.AttesterSlashing:
|
||||
|
||||
@@ -118,7 +118,6 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
SyncCommitteeContributionTopic,
|
||||
BLSToExecutionChangeTopic,
|
||||
BlobSidecarTopic,
|
||||
InclusionListTopic,
|
||||
AttesterSlashingTopic,
|
||||
ProposerSlashingTopic,
|
||||
BlockGossipTopic,
|
||||
@@ -208,20 +207,6 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
Blob: &vblob,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: operation.InclusionListReceived,
|
||||
Data: &operation.InclusionListReceivedData{
|
||||
SignedInclusionList: ð.SignedInclusionList{
|
||||
Message: ð.InclusionList{
|
||||
Slot: 0,
|
||||
ValidatorIndex: 0,
|
||||
InclusionListCommitteeRoot: make([]byte, fieldparams.RootLength),
|
||||
Transactions: [][]byte{},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: operation.AttesterSlashingReceived,
|
||||
Data: &operation.AttesterSlashingReceivedData{
|
||||
|
||||
@@ -1365,7 +1365,6 @@ func TestGetAttestationData(t *testing.T) {
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationDataCache(),
|
||||
OptimisticModeFetcher: chain,
|
||||
ChainInfoFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1525,7 +1524,6 @@ func TestGetAttestationData(t *testing.T) {
|
||||
HeadFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
ChainInfoFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1685,7 +1683,6 @@ func TestGetAttestationData(t *testing.T) {
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
ChainInfoFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1856,7 +1853,6 @@ func TestGetAttestationData(t *testing.T) {
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
ChainInfoFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
"duties_v2.go",
|
||||
"exit.go",
|
||||
"log.go",
|
||||
"inclusion_list.go",
|
||||
"proposer.go",
|
||||
"proposer_altair.go",
|
||||
"proposer_attestations.go",
|
||||
|
||||
@@ -69,7 +69,6 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
ChainInfoFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:], State: beaconState},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -185,7 +185,6 @@ func TestGetAttestationData_OK(t *testing.T) {
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
AttestationCache: cache.NewAttestationDataCache(),
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
ChainInfoFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -315,7 +314,6 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{}},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
ChainInfoFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
},
|
||||
}
|
||||
_, err = as.GetAttestationData(t.Context(), ðpb.AttestationDataRequest{})
|
||||
@@ -428,9 +426,6 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -500,7 +495,6 @@ func TestGetAttestationData_CommitteeIndexIsZeroPostElectra(t *testing.T) {
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
AttestationCache: cache.NewAttestationDataCache(),
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
ChainInfoFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,6 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
assignment.Committee = ca.Committee
|
||||
assignment.AttesterSlot = ca.AttesterSlot
|
||||
assignment.CommitteeIndex = ca.CommitteeIndex
|
||||
assignment.InclusionListCommitteeSlot = ca.InclusionListCommitteeSlot
|
||||
}
|
||||
// Save the next epoch assignments.
|
||||
ca, ok = nextEpochAssignments[idx]
|
||||
@@ -121,7 +120,6 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
nextAssignment.Committee = ca.Committee
|
||||
nextAssignment.AttesterSlot = ca.AttesterSlot
|
||||
nextAssignment.CommitteeIndex = ca.CommitteeIndex
|
||||
nextAssignment.InclusionListCommitteeSlot = ca.InclusionListCommitteeSlot
|
||||
}
|
||||
} else {
|
||||
// If the validator isn't in the beacon state, try finding their deposit to determine their status.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
@@ -144,12 +143,11 @@ type dutiesMetadata struct {
|
||||
}
|
||||
|
||||
type metadata struct {
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
inclusionListCommitteesBySlot [][]primitives.ValidatorIndex
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
}
|
||||
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch) (*dutiesMetadata, error) {
|
||||
@@ -195,18 +193,6 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compute inclusion list committees for each slot in the epoch
|
||||
meta.inclusionListCommitteesBySlot = make([][]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
slot := meta.startSlot + i
|
||||
inclusionListCommittee, err := helpers.GetInclusionListCommittee(ctx, s, slot)
|
||||
if err != nil {
|
||||
// Skip inclusion list committee computation if not supported for this slot
|
||||
continue
|
||||
}
|
||||
meta.inclusionListCommitteesBySlot[i] = inclusionListCommittee
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
@@ -229,37 +215,11 @@ func (vs *Server) buildValidatorDuty(
|
||||
assignment.ProposerSlots = meta.current.proposalSlots[idx]
|
||||
populateCommitteeFields(assignment, meta.current.liteAssignment)
|
||||
|
||||
// Check for inclusion list committee assignment in current epoch
|
||||
for slotOffset, inclusionListCommittee := range meta.current.inclusionListCommitteesBySlot {
|
||||
for _, validatorIndex := range inclusionListCommittee {
|
||||
if validatorIndex == idx {
|
||||
assignment.InclusionListCommitteeSlot = meta.current.startSlot + primitives.Slot(slotOffset)
|
||||
break
|
||||
}
|
||||
}
|
||||
if assignment.InclusionListCommitteeSlot != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
nextAssignment.ValidatorIndex = idx
|
||||
nextAssignment.Status = statusEnum
|
||||
nextAssignment.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextAssignment, meta.next.liteAssignment)
|
||||
|
||||
// Check for inclusion list committee assignment in next epoch
|
||||
for slotOffset, inclusionListCommittee := range meta.next.inclusionListCommitteesBySlot {
|
||||
for _, validatorIndex := range inclusionListCommittee {
|
||||
if validatorIndex == idx {
|
||||
nextAssignment.InclusionListCommitteeSlot = meta.next.startSlot + primitives.Slot(slotOffset)
|
||||
break
|
||||
}
|
||||
}
|
||||
if nextAssignment.InclusionListCommitteeSlot != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Sync committee flags
|
||||
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
|
||||
inSync, err := helpers.IsCurrentPeriodSyncCommittee(s, idx)
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetInclusionList retrieves the inclusion list for the specified slot.
|
||||
// The slot must be the current or next slot. The inclusion list is built using
|
||||
// committee indices, the execution payload header from beacon state, and the transactions from the execution engine.
|
||||
func (vs *Server) GetInclusionList(ctx context.Context, request *ethpb.GetInclusionListRequest) (*ethpb.InclusionList, error) {
|
||||
currentSlot := vs.TimeFetcher.CurrentSlot()
|
||||
if request.Slot != currentSlot && request.Slot+1 != currentSlot {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "requested slot %d is not current or next slot", request.Slot)
|
||||
}
|
||||
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get head state: %v", err)
|
||||
}
|
||||
st, err = transition.ProcessSlotsIfPossible(ctx, st, request.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to process slots: %v", err)
|
||||
}
|
||||
indices, err := helpers.GetInclusionListCommittee(ctx, st, request.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root, err := ssz.InclusionListRoot(indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch the transactions associated with the inclusion list.
|
||||
txs, err := vs.ExecutionEngineCaller.GetInclusionList(ctx, [32]byte(header.BlockHash()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.InclusionList{
|
||||
Slot: request.Slot,
|
||||
InclusionListCommitteeRoot: root[:],
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitInclusionList broadcasts a signed inclusion list to the P2P network and caches it locally.
|
||||
func (vs *Server) SubmitInclusionList(ctx context.Context, il *ethpb.SignedInclusionList) (*emptypb.Empty, error) {
|
||||
slotStartTime := slots.UnsafeStartTime(vs.TimeFetcher.GenesisTime(), il.Message.Slot)
|
||||
currentTime := time.Now()
|
||||
isBeforeFreezeDeadline := vs.TimeFetcher.CurrentSlot() == il.Message.Slot &&
|
||||
currentTime.Sub(slotStartTime) < time.Duration(params.BeaconConfig().InclusionListFreezeDeadLine)*time.Second
|
||||
if !isBeforeFreezeDeadline {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "inclusion list submission is after freeze deadline")
|
||||
}
|
||||
|
||||
if err := vs.P2P.Broadcast(ctx, il); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vs.InclusionLists.Add(il.Message.Slot, il.Message.ValidatorIndex, il.Message.Transactions, isBeforeFreezeDeadline)
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
@@ -117,16 +117,16 @@ func (vs *Server) getLocalPayloadFromEngine(
|
||||
}
|
||||
|
||||
finalizedBlockHash := [32]byte{}
|
||||
justifiedBlockHash := [32]byte{}
|
||||
safeBlockHash := [32]byte{}
|
||||
// Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||
if st.Version() >= version.Bellatrix {
|
||||
finalizedBlockHash = vs.FinalizationFetcher.FinalizedBlockHash()
|
||||
justifiedBlockHash = vs.FinalizationFetcher.UnrealizedJustifiedPayloadBlockHash()
|
||||
safeBlockHash = vs.ForkchoiceFetcher.SafeBlockHash()
|
||||
}
|
||||
|
||||
f := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: parentHash,
|
||||
SafeBlockHash: justifiedBlockHash[:],
|
||||
SafeBlockHash: safeBlockHash[:],
|
||||
FinalizedBlockHash: finalizedBlockHash[:],
|
||||
}
|
||||
|
||||
|
||||
@@ -151,6 +151,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr, GetPayloadResponse: &blocks.GetPayloadResponse{ExecutionData: ed, OverrideBuilder: tt.override}},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
ForkchoiceFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: beaconDB,
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
@@ -252,6 +253,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{State: transitionSt},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
ForkchoiceFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: beaconDB,
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
|
||||
@@ -80,7 +80,6 @@ type Server struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CoreService *core.Service
|
||||
AttestationStateFetcher blockchain.AttestationStateFetcher
|
||||
InclusionLists *cache.InclusionLists
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
|
||||
@@ -24,10 +24,13 @@ func (vs *Server) GetSyncMessageBlockRoot(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := vs.ForkchoiceFetcher.GetAttesterHead() // Sync committee vote based on IL constrained head root.
|
||||
r, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.SyncMessageBlockRootResponse{
|
||||
Root: r[:],
|
||||
Root: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -77,22 +80,24 @@ func (vs *Server) GetSyncCommitteeContribution(
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee messages: %v", err)
|
||||
}
|
||||
headRoot := vs.ForkchoiceFetcher.GetAttesterHead()
|
||||
|
||||
headRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
}
|
||||
sig, aggregatedBits, err := vs.CoreService.AggregatedSigAndAggregationBits(
|
||||
ctx,
|
||||
ðpb.AggregatedSigAndAggregationBitsRequest{
|
||||
Msgs: msgs,
|
||||
Slot: req.Slot,
|
||||
SubnetId: req.SubnetId,
|
||||
BlockRoot: headRoot[:],
|
||||
BlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get contribution data: %v", err)
|
||||
}
|
||||
contribution := ðpb.SyncCommitteeContribution{
|
||||
Slot: req.Slot,
|
||||
BlockRoot: headRoot[:],
|
||||
BlockRoot: headRoot,
|
||||
SubcommitteeIndex: req.SubnetId,
|
||||
AggregationBits: aggregatedBits,
|
||||
Signature: sig,
|
||||
|
||||
@@ -25,14 +25,14 @@ import (
|
||||
)
|
||||
|
||||
func TestGetSyncMessageBlockRoot_OK(t *testing.T) {
|
||||
r := [32]byte{'a'}
|
||||
r := []byte{'a'}
|
||||
server := &Server{
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
ForkchoiceFetcher: &mock.ChainService{Root: r[:]},
|
||||
HeadFetcher: &mock.ChainService{Root: r},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
}
|
||||
res, err := server.GetSyncMessageBlockRoot(t.Context(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r[:], res.Root)
|
||||
require.DeepEqual(t, r, res.Root)
|
||||
}
|
||||
|
||||
func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
@@ -42,7 +42,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &mock.ChainService{},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
|
||||
server = &Server{
|
||||
ForkchoiceFetcher: &mock.ChainService{},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
}
|
||||
@@ -118,7 +118,6 @@ func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) {
|
||||
HeadFetcher: headFetcher,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
ForkchoiceFetcher: headFetcher,
|
||||
}
|
||||
secKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -123,7 +123,6 @@ type Config struct {
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
InclusionListsCache *cache.InclusionLists
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -213,7 +212,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
FinalizedFetcher: s.cfg.FinalizationFetcher,
|
||||
ReplayerBuilder: ch,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
}
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -254,7 +252,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
|
||||
PayloadIDCache: s.cfg.PayloadIDCache,
|
||||
AttestationStateFetcher: s.cfg.AttestationReceiver,
|
||||
InclusionLists: s.cfg.InclusionListsCache,
|
||||
}
|
||||
s.validatorServer = validatorServer
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"inclusion_list.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
@@ -121,7 +120,6 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// validateInclusionList validates an incoming inclusion list message.
|
||||
// Returns appropriate validation results based on the following rules:
|
||||
// [REJECT] The slot `message.slot` is equal to the previous or current slot.
|
||||
// [IGNORE] The slot `message.slot` is equal to the current slot, or it is equal to the previous slot and the current time
|
||||
//
|
||||
// is less than attestation_deadline seconds into the slot.
|
||||
//
|
||||
// [IGNORE] The inclusion_list_committee for slot `message.slot` on the current branch corresponds to `message.inclusion_list_committee_root`,
|
||||
//
|
||||
// as determined by `hash_tree_root(inclusion_list_committee) == message.inclusion_list_committee_root`.
|
||||
//
|
||||
// [REJECT] The validator index `message.validator_index` is within the inclusion_list_committee corresponding to `message.inclusion_list_committee_root`.
|
||||
// [REJECT] The transactions `message.transactions` length is within the upper bound MAX_TRANSACTIONS_PER_INCLUSION_LIST.
|
||||
// [IGNORE] The message is either the first or second valid message received from the validator with index `message.validator_index`.
|
||||
// [REJECT] The signature of `inclusion_list.signature` is valid with respect to the validator index.
|
||||
func (s *Service) validateInclusionList(ctx context.Context, id peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Skip self-published messages.
|
||||
if id == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore if the node is currently syncing.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Validate topic presence.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the pubsub message into the appropriate type.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
il, ok := m.(*eth.SignedInclusionList)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// Check for nil inclusion list.
|
||||
if err := helpers.ValidateNilSignedInclusionList(il); err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Validate slot constraints.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
if il.Message.Slot != currentSlot && il.Message.Slot+1 != currentSlot {
|
||||
return pubsub.ValidationReject, errors.New("slot %d is not equal to the previous %d or current %d slot")
|
||||
}
|
||||
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, s.cfg.chain.GenesisTime(), prysmTime.Now())
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
deadline := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if il.Message.Slot+1 == currentSlot && uint64(secondsSinceSlotStart.Seconds()) > deadline {
|
||||
return pubsub.ValidationIgnore, errors.New("slot is equal to the previous slot and the current time is more than attestation_deadline seconds into the slot")
|
||||
}
|
||||
|
||||
// Fetch the current head state.
|
||||
st, err := s.cfg.chain.HeadState(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Validate inclusion list committee root.
|
||||
committee, err := helpers.GetInclusionListCommittee(ctx, st, il.Message.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
root, err := ssz.InclusionListRoot(committee)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if root != [32]byte(il.Message.InclusionListCommitteeRoot) {
|
||||
return pubsub.ValidationReject, errors.New("inclusion_list_committee_root does not match the inclusion_list_committee")
|
||||
}
|
||||
|
||||
// Validate validator index is within the committee.
|
||||
var included bool
|
||||
for _, i := range committee {
|
||||
if i == il.Message.ValidatorIndex {
|
||||
included = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !included {
|
||||
return pubsub.ValidationReject, errors.New("validator_index is not within the inclusion_list_committee")
|
||||
}
|
||||
|
||||
// Validate transaction size.
|
||||
totalSize := 0
|
||||
for _, transaction := range il.Message.Transactions {
|
||||
totalSize += len(transaction)
|
||||
}
|
||||
if totalSize > 8*1024 {
|
||||
return pubsub.ValidationReject, errors.New("total size of transactions exceeds 8KB")
|
||||
}
|
||||
|
||||
// Check for duplicate inclusion list from the validator.
|
||||
if s.inclusionLists.SeenTwice(il.Message.Slot, il.Message.ValidatorIndex) {
|
||||
return pubsub.ValidationReject, errors.New("inclusion list seen twice")
|
||||
}
|
||||
|
||||
// Validate the inclusion list signature.
|
||||
if err := helpers.ValidateInclusionListSignature(ctx, st, il); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
msg.ValidatorData = il
|
||||
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// subscriberInclusionList handles incoming inclusion list messages by adding them to the local inclusion list cache.
|
||||
func (s *Service) subscriberInclusionList(ctx context.Context, msg proto.Message) error {
|
||||
il, ok := msg.(*eth.SignedInclusionList)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *ethpb.SignedInclusionList, type=%T", msg)
|
||||
}
|
||||
if il == nil {
|
||||
return errors.New("nil inclusion list")
|
||||
}
|
||||
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), il.Message.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compute start time for slot %d", il.Message.Slot)
|
||||
}
|
||||
timeNow := time.Now()
|
||||
isBeforeFreezeDeadline := s.cfg.clock.CurrentSlot() == il.Message.Slot &&
|
||||
timeNow.Sub(startTime) < time.Duration(params.BeaconConfig().InclusionListFreezeDeadLine)*time.Second
|
||||
|
||||
slotStartTime := slots.UnsafeStartTime(s.cfg.chain.GenesisTime(), il.Message.Slot)
|
||||
ilTxByteSize := 0
|
||||
for _, transaction := range il.Message.Transactions {
|
||||
ilTxByteSize += len(transaction)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": il.Message.Slot,
|
||||
"committeeRoot": fmt.Sprintf("%#x", il.Message.InclusionListCommitteeRoot),
|
||||
"txCount": len(il.Message.Transactions),
|
||||
"sinceSlotStart": time.Since(slotStartTime),
|
||||
"ilTxByteSize": ilTxByteSize,
|
||||
}).Info("Inclusion list verified and received")
|
||||
|
||||
s.inclusionLists.Add(il.Message.Slot, il.Message.ValidatorIndex, il.Message.Transactions, isBeforeFreezeDeadline)
|
||||
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.InclusionListReceived,
|
||||
Data: &opfeed.InclusionListReceivedData{
|
||||
SignedInclusionList: il,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -215,14 +215,6 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithInclusionListsCache allows sync pkg to access inclusion lists cache.
|
||||
func WithInclusionListsCache(c *cache.InclusionLists) Option {
|
||||
return func(s *Service) error {
|
||||
s.inclusionLists = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLightClientStore allows the sync package to access light client data.
|
||||
func WithLightClientStore(lcs *lightClient.Store) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -125,10 +125,6 @@ func (s *Service) rpcHandlerByTopicFromEpoch(epoch primitives.Epoch) (map[string
|
||||
// Get the beacon config.
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if epoch >= beaconConfig.Eip7805ForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Focil)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Fulu)
|
||||
}
|
||||
|
||||
@@ -76,12 +76,6 @@ func WriteBlockChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, en
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Focil:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().Eip7805ForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
default:
|
||||
return errors.Wrapf(ErrUnrecognizedVersion, "block version %d is not recognized", blk.Version())
|
||||
}
|
||||
|
||||
@@ -178,7 +178,6 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
inclusionLists *cache.InclusionLists
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
|
||||
@@ -225,21 +225,6 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
})
|
||||
s.subscribe( // Hack for kurtosis starting from genesis
|
||||
p2p.InclusionListTopicFormat,
|
||||
s.validateInclusionList,
|
||||
s.subscriberInclusionList,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
if params.BeaconConfig().Eip7805ForkEpoch <= epoch {
|
||||
s.subscribe(
|
||||
p2p.InclusionListTopicFormat,
|
||||
s.validateInclusionList,
|
||||
s.subscriberInclusionList,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
|
||||
3
changelog/tt_fast_confirmation.md
Normal file
3
changelog/tt_fast_confirmation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Implement fast confirmation algorithm for Prysm.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- FOCIL
|
||||
@@ -333,6 +333,7 @@ var (
|
||||
Usage: "Specifies the retention period for the pruner service in terms of epochs. " +
|
||||
"If this value is less than MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored.",
|
||||
}
|
||||
|
||||
// SubscribeAllDataSubnets enables subscription to all data subnets.
|
||||
SubscribeAllDataSubnets = &cli.BoolFlag{
|
||||
Name: "subscribe-all-data-subnets",
|
||||
@@ -344,4 +345,18 @@ var (
|
||||
Usage: "Maximum number of signatures to batch verify at once for beacon attestation p2p gossip.",
|
||||
Value: 1000,
|
||||
}
|
||||
|
||||
FastConfirmationByzantineThreshold = &cli.Uint64Flag{
|
||||
Name: "fast-confirmation-byzantine-threshold",
|
||||
Usage: "Byzantine threshold percentage (0-100) used for fast confirmation",
|
||||
Value: 33,
|
||||
Aliases: []string{"fc-threshold"},
|
||||
}
|
||||
|
||||
SafeBlock = &cli.StringFlag{
|
||||
Name: "safe-block",
|
||||
Usage: "Algorithm for safe block selection: justified, unrealized-justified, or fast-confirmation",
|
||||
Value: "unrealized-justified",
|
||||
Aliases: []string{"sb"},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -149,6 +149,8 @@ var appFlags = []cli.Flag{
|
||||
bflags.BackfillWorkerCount,
|
||||
bflags.BackfillOldestSlot,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.FastConfirmationByzantineThreshold,
|
||||
flags.SafeBlock,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -163,6 +163,8 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.JwtId,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.FastConfirmationByzantineThreshold,
|
||||
flags.SafeBlock,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring beacon chain monitoring.
|
||||
|
||||
@@ -127,7 +127,6 @@ type BeaconChainConfig struct {
|
||||
DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
|
||||
DomainApplicationBuilder [4]byte `yaml:"DOMAIN_APPLICATION_BUILDER" spec:"true"` // DomainApplicationBuilder defines the BLS signature domain for application builder.
|
||||
DomainBLSToExecutionChange [4]byte `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE" spec:"true"` // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
|
||||
DomainInclusionListCommittee [4]byte `yaml:"DOMAIN_INCLUSION_LIST_COMMITTEE" spec:"true"` // DomainInclusionListCommittee defines the BLS signature domain for inclusion list committee root.
|
||||
|
||||
// Prysm constants.
|
||||
GenesisValidatorsRoot [32]byte // GenesisValidatorsRoot is the root hash of the genesis validators.
|
||||
@@ -172,8 +171,6 @@ type BeaconChainConfig struct {
|
||||
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for electra.
|
||||
FuluForkVersion []byte `yaml:"FULU_FORK_VERSION" spec:"true"` // FuluForkVersion is used to represent the fork version for fulu.
|
||||
FuluForkEpoch primitives.Epoch `yaml:"FULU_FORK_EPOCH" spec:"true"` // FuluForkEpoch is used to represent the assigned fork epoch for fulu.
|
||||
Eip7805ForkVersion []byte `yaml:"EIP7805_FORK_VERSION" spec:"true"` // Eip7805ForkVersion is used to represent the fork version for fulu.
|
||||
Eip7805ForkEpoch primitives.Epoch `yaml:"EIP7805_FORK_EPOCH" spec:"true"` // Eip7805ForkEpoch is used to represent the assigned fork epoch for fulu.
|
||||
|
||||
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
|
||||
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
|
||||
@@ -318,8 +315,9 @@ type BeaconChainConfig struct {
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
DeprecatedMaxBlobsPerBlockFulu int `yaml:"MAX_BLOBS_PER_BLOCK_FULU" spec:"true"`
|
||||
|
||||
InclusionListCommitteeSize uint64
|
||||
InclusionListFreezeDeadLine uint64
|
||||
// Safe block config value
|
||||
FastConfirmationByzantineThreshold uint64 // FastConfirmationByzantineThreshold is the Byzantine threshold percentage (0-100) used for fast confirmation.
|
||||
SafeBlockAlgorithm string // SafeBlockAlgorithm is the algorithm for safe block selection
|
||||
}
|
||||
|
||||
func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {
|
||||
@@ -385,7 +383,6 @@ func ConfigForkVersions(b *BeaconChainConfig) map[[fieldparams.VersionLength]byt
|
||||
bytesutil.ToBytes4(b.DenebForkVersion): version.Deneb,
|
||||
bytesutil.ToBytes4(b.ElectraForkVersion): version.Electra,
|
||||
bytesutil.ToBytes4(b.FuluForkVersion): version.Fulu,
|
||||
bytesutil.ToBytes4(b.Eip7805ForkVersion): version.Focil,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ var placeholderFields = []string{
|
||||
"EIP7441_FORK_VERSION",
|
||||
"EIP7732_FORK_EPOCH",
|
||||
"EIP7732_FORK_VERSION",
|
||||
"EIP7805_FORK_EPOCH",
|
||||
"EIP7805_FORK_VERSION",
|
||||
"EPOCHS_PER_SHUFFLING_PHASE",
|
||||
"MAX_BYTES_PER_INCLUSION_LIST",
|
||||
"MAX_REQUEST_BLOB_SIDECARS_FULU",
|
||||
|
||||
@@ -222,8 +222,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
ElectraForkEpoch: mainnetElectraForkEpoch,
|
||||
FuluForkVersion: []byte{6, 0, 0, 0},
|
||||
FuluForkEpoch: mainnetFuluForkEpoch,
|
||||
Eip7805ForkEpoch: math.MaxUint64,
|
||||
Eip7805ForkVersion: []byte{10, 0, 0, 0},
|
||||
|
||||
// New values introduced in Altair hard fork 1.
|
||||
// Participation flag indices.
|
||||
@@ -341,8 +339,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
AttestationSubnetPrefixBits: 6,
|
||||
SubnetsPerNode: 2,
|
||||
NodeIdBits: 256,
|
||||
InclusionListCommitteeSize: 16,
|
||||
InclusionListFreezeDeadLine: 8,
|
||||
|
||||
BlobSchedule: []BlobScheduleEntry{},
|
||||
}
|
||||
|
||||
// MainnetTestConfig provides a version of the mainnet config that has a different name
|
||||
|
||||
@@ -97,8 +97,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.ElectraForkEpoch = math.MaxUint64
|
||||
minimalConfig.FuluForkVersion = []byte{6, 0, 0, 1}
|
||||
minimalConfig.FuluForkEpoch = math.MaxUint64
|
||||
minimalConfig.Eip7805ForkVersion = []byte{10, 0, 0, 1}
|
||||
minimalConfig.Eip7805ForkEpoch = math.MaxUint64
|
||||
|
||||
minimalConfig.SyncCommitteeSize = 32
|
||||
minimalConfig.InactivityScoreBias = 4
|
||||
|
||||
@@ -713,14 +713,6 @@ func (b *BeaconBlock) Version() int {
|
||||
return b.version
|
||||
}
|
||||
|
||||
func (b *BeaconBlock) NotSatisfyingInclusionList() bool {
|
||||
return b.notSatisfyingInclusionList
|
||||
}
|
||||
|
||||
func (b *BeaconBlock) MarkInclusionListNotSatisfied() {
|
||||
b.notSatisfyingInclusionList = true
|
||||
}
|
||||
|
||||
// HashTreeRoot returns the ssz root of the block.
|
||||
func (b *BeaconBlock) HashTreeRoot() ([field_params.RootLength]byte, error) {
|
||||
pb, err := b.Proto()
|
||||
|
||||
@@ -63,13 +63,12 @@ var _ interfaces.ReadOnlyBeaconBlockBody = &BeaconBlockBody{}
|
||||
|
||||
// BeaconBlock is the main beacon block structure. It can represent any block type.
|
||||
type BeaconBlock struct {
|
||||
version int
|
||||
slot primitives.Slot
|
||||
proposerIndex primitives.ValidatorIndex
|
||||
parentRoot [field_params.RootLength]byte
|
||||
stateRoot [field_params.RootLength]byte
|
||||
body *BeaconBlockBody
|
||||
notSatisfyingInclusionList bool
|
||||
version int
|
||||
slot primitives.Slot
|
||||
proposerIndex primitives.ValidatorIndex
|
||||
parentRoot [field_params.RootLength]byte
|
||||
stateRoot [field_params.RootLength]byte
|
||||
body *BeaconBlockBody
|
||||
}
|
||||
|
||||
// SignedBeaconBlock is the main signed beacon block structure. It can represent any block type.
|
||||
|
||||
@@ -33,6 +33,7 @@ type Dump struct {
|
||||
FinalizedCheckpoint *eth.Checkpoint
|
||||
UnrealizedJustifiedCheckpoint *eth.Checkpoint
|
||||
UnrealizedFinalizedCheckpoint *eth.Checkpoint
|
||||
SafeHeadRoot []byte
|
||||
ProposerBoostRoot []byte
|
||||
PreviousProposerBoostRoot []byte
|
||||
HeadRoot []byte
|
||||
|
||||
@@ -47,8 +47,6 @@ type ReadOnlyBeaconBlock interface {
|
||||
ssz.HashRoot
|
||||
Version() int
|
||||
AsSignRequestObject() (validatorpb.SignRequestObject, error)
|
||||
NotSatisfyingInclusionList() bool
|
||||
MarkInclusionListNotSatisfied()
|
||||
}
|
||||
|
||||
// ReadOnlyBeaconBlockBody describes the method set employed by an object
|
||||
|
||||
@@ -165,14 +165,6 @@ func (BeaconBlock) SetParentRoot(_ []byte) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m BeaconBlock) NotSatisfyingInclusionList() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m BeaconBlock) MarkInclusionListNotSatisfied() {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
type BeaconBlockBody struct{}
|
||||
|
||||
func (BeaconBlockBody) RandaoReveal() [field_params.BLSSignatureLength]byte {
|
||||
|
||||
@@ -13,8 +13,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -90,8 +90,6 @@ func FromForkVersion(cv [fieldparams.VersionLength]byte) (*VersionedUnmarshaler,
|
||||
fork = version.Electra
|
||||
case bytesutil.ToBytes4(cfg.FuluForkVersion):
|
||||
fork = version.Fulu
|
||||
case bytesutil.ToBytes4(cfg.Eip7805ForkVersion):
|
||||
fork = version.Focil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrForkNotFound, "version=%#x", cv)
|
||||
}
|
||||
@@ -167,7 +165,7 @@ func (cf *VersionedUnmarshaler) UnmarshalBeaconState(marshaled []byte) (s state.
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to init state trie from state, detected fork=%s", forkName)
|
||||
}
|
||||
case version.Fulu, version.Focil:
|
||||
case version.Fulu:
|
||||
st := ðpb.BeaconStateFulu{}
|
||||
err = st.UnmarshalSSZ(marshaled)
|
||||
if err != nil {
|
||||
@@ -227,7 +225,7 @@ func (cf *VersionedUnmarshaler) UnmarshalBeaconBlock(marshaled []byte) (interfac
|
||||
blk = ðpb.SignedBeaconBlockDeneb{}
|
||||
case version.Electra:
|
||||
blk = ðpb.SignedBeaconBlockElectra{}
|
||||
case version.Fulu, version.Focil:
|
||||
case version.Fulu:
|
||||
blk = ðpb.SignedBeaconBlockFulu{}
|
||||
default:
|
||||
forkName := version.String(cf.Fork)
|
||||
@@ -266,7 +264,7 @@ func (cf *VersionedUnmarshaler) UnmarshalBlindedBeaconBlock(marshaled []byte) (i
|
||||
blk = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
case version.Electra:
|
||||
blk = ðpb.SignedBlindedBeaconBlockElectra{}
|
||||
case version.Fulu, version.Focil:
|
||||
case version.Fulu:
|
||||
blk = ðpb.SignedBlindedBeaconBlockFulu{}
|
||||
default:
|
||||
forkName := version.String(cf.Fork)
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -177,21 +175,6 @@ func ByteSliceRoot(slice []byte, maxLength uint64) ([32]byte, error) {
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
}
|
||||
|
||||
// InclusionListRoot computes and returns the hash tree root of a list of inclusion list committee indices.
|
||||
func InclusionListRoot(committee []primitives.ValidatorIndex) ([32]byte, error) {
|
||||
b := make([][]byte, params.BeaconConfig().InclusionListCommitteeSize)
|
||||
for i := 0; i < len(committee) && i < len(b); i++ {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(committee[i]))
|
||||
b[i] = buf
|
||||
}
|
||||
chunks, err := PackByChunk(b)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return BitwiseMerkleize(chunks, uint64(len(chunks)), uint64(len(chunks)))
|
||||
}
|
||||
|
||||
func withdrawalRoot(w *enginev1.Withdrawal) ([32]byte, error) {
|
||||
fieldRoots := make([][32]byte, 4)
|
||||
if w != nil {
|
||||
|
||||
175
proto/engine/v1/execution_engine.pb.go
generated
175
proto/engine/v1/execution_engine.pb.go
generated
@@ -26,13 +26,12 @@ const (
|
||||
type PayloadStatus_Status int32
|
||||
|
||||
const (
|
||||
PayloadStatus_UNKNOWN PayloadStatus_Status = 0
|
||||
PayloadStatus_VALID PayloadStatus_Status = 1
|
||||
PayloadStatus_INVALID PayloadStatus_Status = 2
|
||||
PayloadStatus_SYNCING PayloadStatus_Status = 3
|
||||
PayloadStatus_ACCEPTED PayloadStatus_Status = 4
|
||||
PayloadStatus_INVALID_BLOCK_HASH PayloadStatus_Status = 5
|
||||
PayloadStatus_INCLUSION_LIST_NOT_SATISFIED PayloadStatus_Status = 6
|
||||
PayloadStatus_UNKNOWN PayloadStatus_Status = 0
|
||||
PayloadStatus_VALID PayloadStatus_Status = 1
|
||||
PayloadStatus_INVALID PayloadStatus_Status = 2
|
||||
PayloadStatus_SYNCING PayloadStatus_Status = 3
|
||||
PayloadStatus_ACCEPTED PayloadStatus_Status = 4
|
||||
PayloadStatus_INVALID_BLOCK_HASH PayloadStatus_Status = 5
|
||||
)
|
||||
|
||||
// Enum value maps for PayloadStatus_Status.
|
||||
@@ -44,16 +43,14 @@ var (
|
||||
3: "SYNCING",
|
||||
4: "ACCEPTED",
|
||||
5: "INVALID_BLOCK_HASH",
|
||||
6: "INCLUSION_LIST_NOT_SATISFIED",
|
||||
}
|
||||
PayloadStatus_Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"VALID": 1,
|
||||
"INVALID": 2,
|
||||
"SYNCING": 3,
|
||||
"ACCEPTED": 4,
|
||||
"INVALID_BLOCK_HASH": 5,
|
||||
"INCLUSION_LIST_NOT_SATISFIED": 6,
|
||||
"UNKNOWN": 0,
|
||||
"VALID": 1,
|
||||
"INVALID": 2,
|
||||
"SYNCING": 3,
|
||||
"ACCEPTED": 4,
|
||||
"INVALID_BLOCK_HASH": 5,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -2326,7 +2323,7 @@ var file_proto_engine_v1_execution_engine_proto_rawDesc = []byte{
|
||||
0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a,
|
||||
0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x65, 0x61,
|
||||
0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0xb5, 0x02, 0x0a,
|
||||
0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x92, 0x02, 0x0a,
|
||||
0x0d, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x40,
|
||||
0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
|
||||
@@ -2338,83 +2335,81 @@ var file_proto_engine_v1_execution_engine_proto_rawDesc = []byte{
|
||||
0x48, 0x61, 0x73, 0x68, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
|
||||
0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22,
|
||||
0x82, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
|
||||
0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44,
|
||||
0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12,
|
||||
0x0b, 0x0a, 0x07, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08,
|
||||
0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x49, 0x4e,
|
||||
0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48,
|
||||
0x10, 0x05, 0x12, 0x20, 0x0a, 0x1c, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
|
||||
0x4c, 0x49, 0x53, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x41, 0x54, 0x49, 0x53, 0x46, 0x49,
|
||||
0x45, 0x44, 0x10, 0x06, 0x22, 0xab, 0x01, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f,
|
||||
0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64,
|
||||
0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x42,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2e, 0x0a, 0x0f, 0x73, 0x61, 0x66, 0x65,
|
||||
0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x73, 0x61, 0x66, 0x65, 0x42,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x38, 0x0a, 0x14, 0x66, 0x69, 0x6e, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x12,
|
||||
0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61,
|
||||
0x73, 0x68, 0x22, 0xd5, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61,
|
||||
0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x77, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64,
|
||||
0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73,
|
||||
0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65,
|
||||
0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x12, 0x20, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
|
||||
0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x0b, 0x42,
|
||||
0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a,
|
||||
0x60, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b,
|
||||
0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10,
|
||||
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0b,
|
||||
0x0a, 0x07, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x41,
|
||||
0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x49, 0x4e, 0x56,
|
||||
0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10,
|
||||
0x05, 0x22, 0xab, 0x01, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x6b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65,
|
||||
0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2e, 0x0a, 0x0f, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x73, 0x61, 0x66, 0x65, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x38, 0x0a, 0x14, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x12, 0x66, 0x69, 0x6e,
|
||||
0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22,
|
||||
0xd5, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x12, 0x77, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f,
|
||||
0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82,
|
||||
0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66,
|
||||
0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56,
|
||||
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, 0x76,
|
||||
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a,
|
||||
0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x62,
|
||||
0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a, 0x67, 0x5f, 0x63,
|
||||
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30,
|
||||
0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e,
|
||||
0x74, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||
0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04,
|
||||
0x34, 0x30, 0x39, 0x36, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05,
|
||||
0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18,
|
||||
0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39,
|
||||
0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0d, 0x42, 0x6c, 0x6f,
|
||||
0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x32, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a,
|
||||
0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18,
|
||||
0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92,
|
||||
0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12,
|
||||
0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x14,
|
||||
0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5, 0x18, 0x04,
|
||||
0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0d,
|
||||
0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x32, 0x12, 0x39, 0x0a,
|
||||
0x0f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38,
|
||||
0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d,
|
||||
0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c,
|
||||
0x34, 0x38, 0x92, 0xb5, 0x18, 0x06, 0x35, 0x32, 0x34, 0x32, 0x38, 0x38, 0x52, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37,
|
||||
0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73,
|
||||
0x22, 0x26, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x1e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30,
|
||||
0x37, 0x32, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x53, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62,
|
||||
0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30,
|
||||
0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f,
|
||||
0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
|
||||
0x02, 0x34, 0x38, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x61, 0x0a,
|
||||
0x0e, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x32, 0x12,
|
||||
0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a,
|
||||
0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12,
|
||||
0x2f, 0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x06, 0x35,
|
||||
0x32, 0x34, 0x32, 0x38, 0x38, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73,
|
||||
0x42, 0x95, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78, 0x65,
|
||||
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa, 0x02,
|
||||
0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65,
|
||||
0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92,
|
||||
0xb5, 0x18, 0x06, 0x35, 0x32, 0x34, 0x32, 0x38, 0x38, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5,
|
||||
0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x26, 0x0a,
|
||||
0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x1e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52,
|
||||
0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x53, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52,
|
||||
0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f,
|
||||
0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38,
|
||||
0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x61, 0x0a, 0x0e, 0x42, 0x6c,
|
||||
0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x32, 0x12, 0x1e, 0x0a, 0x04,
|
||||
0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06,
|
||||
0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2f, 0x0a, 0x0a,
|
||||
0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x42, 0x10, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x06, 0x35, 0x32, 0x34, 0x32,
|
||||
0x38, 0x38, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x95, 0x01,
|
||||
0x0a, 0x16, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66,
|
||||
0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa, 0x02, 0x12, 0x45, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x56, 0x31,
|
||||
0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x6e, 0x67, 0x69,
|
||||
0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -28,7 +28,7 @@ message ExecutionPayload {
|
||||
bytes parent_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes fee_recipient = 2 [ (ethereum.eth.ext.ssz_size) = "20" ];
|
||||
bytes state_root = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes receipts_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes receipts_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes logs_bloom = 5 [ (ethereum.eth.ext.ssz_size) = "logs_bloom.size" ];
|
||||
bytes prev_randao = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
uint64 block_number = 7;
|
||||
@@ -204,7 +204,6 @@ message PayloadStatus {
|
||||
SYNCING = 3;
|
||||
ACCEPTED = 4;
|
||||
INVALID_BLOCK_HASH = 5;
|
||||
INCLUSION_LIST_NOT_SATISFIED = 6;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
17
proto/engine/v1/fulu.pb.go
generated
17
proto/engine/v1/fulu.pb.go
generated
@@ -126,16 +126,17 @@ var file_proto_engine_v1_fulu_proto_rawDesc = []byte{
|
||||
0x64, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x78, 0x65,
|
||||
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
|
||||
0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8d, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8e, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x42, 0x0c, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa, 0x02,
|
||||
0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65,
|
||||
0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
|
||||
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa,
|
||||
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c,
|
||||
0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,7 +5,7 @@ package ethereum.engine.v1;
|
||||
import "proto/engine/v1/execution_engine.proto";
|
||||
|
||||
option csharp_namespace = "Ethereum.Engine.V1";
|
||||
option go_package = "github.com/OffchainLabs/prysm/v6/proto/engine/v1;enginev1";
|
||||
option go_package = "github.com/prysmaticlabs/prysm/v5/proto/engine/v1;enginev1";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ElectraProto";
|
||||
option java_package = "org.ethereum.engine.v1";
|
||||
|
||||
@@ -631,7 +631,7 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
|
||||
bundleV2 := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
|
||||
@@ -19,7 +19,7 @@ import "proto/eth/ext/options.proto";
|
||||
import "proto/prysm/v1alpha1/beacon_block.proto";
|
||||
|
||||
option csharp_namespace = "Ethereum.Eth.v1alpha1";
|
||||
option go_package = "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1;eth";
|
||||
option go_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1;eth";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DataColumnsProto";
|
||||
option java_package = "org.ethereum.eth.v1alpha1";
|
||||
|
||||
@@ -25,7 +25,6 @@ proto_library(
|
||||
"powchain.proto",
|
||||
"slasher.proto",
|
||||
"validator.proto",
|
||||
"focil.proto",
|
||||
":ssz_proto_files",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -193,8 +192,6 @@ ssz_fulu_objs = [
|
||||
"SignedBeaconBlockContentsFulu",
|
||||
"SignedBeaconBlockFulu",
|
||||
"SignedBlindedBeaconBlockFulu",
|
||||
"InclusionList",
|
||||
"SignedInclusionList",
|
||||
]
|
||||
|
||||
ssz_gen_marshal(
|
||||
|
||||
@@ -941,19 +941,16 @@ message BuilderBidDeneb {
|
||||
}
|
||||
|
||||
message BuilderBidElectra {
|
||||
ethereum.engine.v1.ExecutionPayloadHeaderDeneb header = 1;
|
||||
repeated bytes blob_kzg_commitments = 2 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,48",
|
||||
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
|
||||
];
|
||||
ethereum.engine.v1.ExecutionRequests execution_requests = 3;
|
||||
bytes value = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
bytes pubkey = 5 [ (ethereum.eth.ext.ssz_size) = "48" ];
|
||||
ethereum.engine.v1.ExecutionPayloadHeaderDeneb header = 1;
|
||||
repeated bytes blob_kzg_commitments = 2 [(ethereum.eth.ext.ssz_size) = "?,48", (ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"];
|
||||
ethereum.engine.v1.ExecutionRequests execution_requests = 3;
|
||||
bytes value = 4 [(ethereum.eth.ext.ssz_size) = "32"];
|
||||
bytes pubkey = 5 [(ethereum.eth.ext.ssz_size) = "48"];
|
||||
}
|
||||
|
||||
message SignedBuilderBidElectra {
|
||||
BuilderBidElectra message = 1;
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
BuilderBidElectra message = 1 ;
|
||||
bytes signature = 2 [(ethereum.eth.ext.ssz_size) = "96"];
|
||||
}
|
||||
|
||||
message BlobSidecars {
|
||||
|
||||
279
proto/prysm/v1alpha1/focil.pb.go
generated
279
proto/prysm/v1alpha1/focil.pb.go
generated
@@ -1,279 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc v3.21.7
|
||||
// source: proto/prysm/v1alpha1/focil.proto
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
github_com_OffchainLabs_prysm_v6_consensus_types_primitives "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
_ "github.com/OffchainLabs/prysm/v6/proto/eth/ext"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type InclusionList struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Slot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,1,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"`
|
||||
ValidatorIndex github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex `protobuf:"varint,2,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.ValidatorIndex"`
|
||||
InclusionListCommitteeRoot []byte `protobuf:"bytes,3,opt,name=inclusion_list_committee_root,json=inclusionListCommitteeRoot,proto3" json:"inclusion_list_committee_root,omitempty" ssz-size:"32"`
|
||||
Transactions [][]byte `protobuf:"bytes,4,rep,name=transactions,proto3" json:"transactions,omitempty" ssz-max:"1048576,1073741824" ssz-size:"?,?"`
|
||||
}
|
||||
|
||||
func (x *InclusionList) Reset() {
|
||||
*x = InclusionList{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_focil_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *InclusionList) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*InclusionList) ProtoMessage() {}
|
||||
|
||||
func (x *InclusionList) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_focil_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use InclusionList.ProtoReflect.Descriptor instead.
|
||||
func (*InclusionList) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_focil_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *InclusionList) GetSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.Slot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *InclusionList) GetValidatorIndex() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex {
|
||||
if x != nil {
|
||||
return x.ValidatorIndex
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(0)
|
||||
}
|
||||
|
||||
func (x *InclusionList) GetInclusionListCommitteeRoot() []byte {
|
||||
if x != nil {
|
||||
return x.InclusionListCommitteeRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *InclusionList) GetTransactions() [][]byte {
|
||||
if x != nil {
|
||||
return x.Transactions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SignedInclusionList struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Message *InclusionList `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
|
||||
}
|
||||
|
||||
func (x *SignedInclusionList) Reset() {
|
||||
*x = SignedInclusionList{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_focil_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SignedInclusionList) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SignedInclusionList) ProtoMessage() {}
|
||||
|
||||
func (x *SignedInclusionList) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_focil_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SignedInclusionList.ProtoReflect.Descriptor instead.
|
||||
func (*SignedInclusionList) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_focil_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *SignedInclusionList) GetMessage() *InclusionList {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SignedInclusionList) GetSignature() []byte {
|
||||
if x != nil {
|
||||
return x.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_prysm_v1alpha1_focil_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_prysm_v1alpha1_focil_proto_rawDesc = []byte{
|
||||
0x0a, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x66, 0x6f, 0x63, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x02, 0x0a, 0x0d, 0x49, 0x6e, 0x63, 0x6c, 0x75,
|
||||
0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c,
|
||||
0x6f, 0x74, 0x12, 0x77, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f,
|
||||
0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18,
|
||||
0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63,
|
||||
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
|
||||
0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c,
|
||||
0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, 0x76, 0x61, 0x6c,
|
||||
0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x49, 0x0a, 0x1d, 0x69,
|
||||
0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c,
|
||||
0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74,
|
||||
0x65, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x1d, 0x8a, 0xb5,
|
||||
0x18, 0x03, 0x3f, 0x2c, 0x3f, 0x92, 0xb5, 0x18, 0x12, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36,
|
||||
0x2c, 0x31, 0x30, 0x37, 0x33, 0x37, 0x34, 0x31, 0x38, 0x32, 0x34, 0x52, 0x0c, 0x74, 0x72, 0x61,
|
||||
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7b, 0x0a, 0x13, 0x53, 0x69, 0x67,
|
||||
0x6e, 0x65, 0x64, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x24, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73,
|
||||
0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67,
|
||||
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x94, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x46, 0x4f, 0x43, 0x49, 0x4c, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
|
||||
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15,
|
||||
0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_prysm_v1alpha1_focil_proto_rawDescOnce sync.Once
|
||||
file_proto_prysm_v1alpha1_focil_proto_rawDescData = file_proto_prysm_v1alpha1_focil_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_prysm_v1alpha1_focil_proto_rawDescGZIP() []byte {
|
||||
file_proto_prysm_v1alpha1_focil_proto_rawDescOnce.Do(func() {
|
||||
file_proto_prysm_v1alpha1_focil_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_focil_proto_rawDescData)
|
||||
})
|
||||
return file_proto_prysm_v1alpha1_focil_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_focil_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_prysm_v1alpha1_focil_proto_goTypes = []interface{}{
|
||||
(*InclusionList)(nil), // 0: ethereum.eth.v1alpha1.InclusionList
|
||||
(*SignedInclusionList)(nil), // 1: ethereum.eth.v1alpha1.SignedInclusionList
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_focil_proto_depIdxs = []int32{
|
||||
0, // 0: ethereum.eth.v1alpha1.SignedInclusionList.message:type_name -> ethereum.eth.v1alpha1.InclusionList
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_focil_proto_init() }
|
||||
func file_proto_prysm_v1alpha1_focil_proto_init() {
|
||||
if File_proto_prysm_v1alpha1_focil_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_proto_prysm_v1alpha1_focil_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*InclusionList); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_focil_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SignedInclusionList); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_focil_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_prysm_v1alpha1_focil_proto_goTypes,
|
||||
DependencyIndexes: file_proto_prysm_v1alpha1_focil_proto_depIdxs,
|
||||
MessageInfos: file_proto_prysm_v1alpha1_focil_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_prysm_v1alpha1_focil_proto = out.File
|
||||
file_proto_prysm_v1alpha1_focil_proto_rawDesc = nil
|
||||
file_proto_prysm_v1alpha1_focil_proto_goTypes = nil
|
||||
file_proto_prysm_v1alpha1_focil_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.v1alpha1;
|
||||
|
||||
import "proto/eth/ext/options.proto";
|
||||
|
||||
option csharp_namespace = "Ethereum.Eth.v1alpha1";
|
||||
option go_package = "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1;eth";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FOCILProto";
|
||||
option java_package = "org.ethereum.eth.v1alpha1";
|
||||
option php_namespace = "Ethereum\\Eth\\v1alpha1";
|
||||
|
||||
message InclusionList {
|
||||
uint64 slot = 1 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.Slot"
|
||||
];
|
||||
uint64 validator_index = 2
|
||||
[ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/"
|
||||
"primitives.ValidatorIndex" ];
|
||||
bytes inclusion_list_committee_root = 3
|
||||
[ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
repeated bytes transactions = 4 [
|
||||
(ethereum.eth.ext.ssz_size) = "?,?",
|
||||
(ethereum.eth.ext.ssz_max) = "1048576,1073741824"
|
||||
];
|
||||
}
|
||||
|
||||
message SignedInclusionList {
|
||||
InclusionList message = 1;
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
@@ -2366,287 +2366,6 @@ func (d *DataColumnsByRootIdentifier) HashTreeRootWith(hh *ssz.Hasher) (err erro
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the InclusionList object
|
||||
func (i *InclusionList) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(i)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the InclusionList object to a target array
|
||||
func (i *InclusionList) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(52)
|
||||
|
||||
// Field (0) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(i.Slot))
|
||||
|
||||
// Field (1) 'ValidatorIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(i.ValidatorIndex))
|
||||
|
||||
// Field (2) 'InclusionListCommitteeRoot'
|
||||
if size := len(i.InclusionListCommitteeRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.InclusionListCommitteeRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, i.InclusionListCommitteeRoot...)
|
||||
|
||||
// Offset (3) 'Transactions'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
for ii := 0; ii < len(i.Transactions); ii++ {
|
||||
offset += 4
|
||||
offset += len(i.Transactions[ii])
|
||||
}
|
||||
|
||||
// Field (3) 'Transactions'
|
||||
if size := len(i.Transactions); size > 1048576 {
|
||||
err = ssz.ErrListTooBigFn("--.Transactions", size, 1048576)
|
||||
return
|
||||
}
|
||||
{
|
||||
offset = 4 * len(i.Transactions)
|
||||
for ii := 0; ii < len(i.Transactions); ii++ {
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(i.Transactions[ii])
|
||||
}
|
||||
}
|
||||
for ii := 0; ii < len(i.Transactions); ii++ {
|
||||
if size := len(i.Transactions[ii]); size > 1073741824 {
|
||||
err = ssz.ErrBytesLengthFn("--.Transactions[ii]", size, 1073741824)
|
||||
return
|
||||
}
|
||||
dst = append(dst, i.Transactions[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the InclusionList object
|
||||
func (i *InclusionList) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 52 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o3 uint64
|
||||
|
||||
// Field (0) 'Slot'
|
||||
i.Slot = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
|
||||
|
||||
// Field (1) 'ValidatorIndex'
|
||||
i.ValidatorIndex = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[8:16]))
|
||||
|
||||
// Field (2) 'InclusionListCommitteeRoot'
|
||||
if cap(i.InclusionListCommitteeRoot) == 0 {
|
||||
i.InclusionListCommitteeRoot = make([]byte, 0, len(buf[16:48]))
|
||||
}
|
||||
i.InclusionListCommitteeRoot = append(i.InclusionListCommitteeRoot, buf[16:48]...)
|
||||
|
||||
// Offset (3) 'Transactions'
|
||||
if o3 = ssz.ReadOffset(buf[48:52]); o3 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o3 != 52 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (3) 'Transactions'
|
||||
{
|
||||
buf = tail[o3:]
|
||||
num, err := ssz.DecodeDynamicLength(buf, 1048576)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.Transactions = make([][]byte, num)
|
||||
err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) {
|
||||
if len(buf) > 1073741824 {
|
||||
return ssz.ErrBytesLength
|
||||
}
|
||||
if cap(i.Transactions[indx]) == 0 {
|
||||
i.Transactions[indx] = make([]byte, 0, len(buf))
|
||||
}
|
||||
i.Transactions[indx] = append(i.Transactions[indx], buf...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the InclusionList object
|
||||
func (i *InclusionList) SizeSSZ() (size int) {
|
||||
size = 52
|
||||
|
||||
// Field (3) 'Transactions'
|
||||
for ii := 0; ii < len(i.Transactions); ii++ {
|
||||
size += 4
|
||||
size += len(i.Transactions[ii])
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the InclusionList object
|
||||
func (i *InclusionList) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(i)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the InclusionList object with a hasher
|
||||
func (i *InclusionList) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Slot'
|
||||
hh.PutUint64(uint64(i.Slot))
|
||||
|
||||
// Field (1) 'ValidatorIndex'
|
||||
hh.PutUint64(uint64(i.ValidatorIndex))
|
||||
|
||||
// Field (2) 'InclusionListCommitteeRoot'
|
||||
if size := len(i.InclusionListCommitteeRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.InclusionListCommitteeRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(i.InclusionListCommitteeRoot)
|
||||
|
||||
// Field (3) 'Transactions'
|
||||
{
|
||||
subIndx := hh.Index()
|
||||
num := uint64(len(i.Transactions))
|
||||
if num > 1048576 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
for _, elem := range i.Transactions {
|
||||
{
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(elem))
|
||||
if byteLen > 1073741824 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the SignedInclusionList object
|
||||
func (s *SignedInclusionList) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the SignedInclusionList object to a target array
|
||||
func (s *SignedInclusionList) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(100)
|
||||
|
||||
// Offset (0) 'Message'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if s.Message == nil {
|
||||
s.Message = new(InclusionList)
|
||||
}
|
||||
offset += s.Message.SizeSSZ()
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the SignedInclusionList object
|
||||
func (s *SignedInclusionList) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 100 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Message'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[4:100]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[4:100]...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
if s.Message == nil {
|
||||
s.Message = new(InclusionList)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedInclusionList object
|
||||
func (s *SignedInclusionList) SizeSSZ() (size int) {
|
||||
size = 100
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(InclusionList)
|
||||
}
|
||||
size += s.Message.SizeSSZ()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the SignedInclusionList object
|
||||
func (s *SignedInclusionList) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(s)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the SignedInclusionList object with a hasher
|
||||
func (s *SignedInclusionList) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Message'
|
||||
if err = s.Message.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the StatusV2 object
|
||||
func (s *StatusV2) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user