mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
57 Commits
delet-eth1
...
fixPrivate
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d081396ea | ||
|
|
303e7c2d8c | ||
|
|
b616a7f537 | ||
|
|
c51bedb842 | ||
|
|
55e436eceb | ||
|
|
a6496b9408 | ||
|
|
3b62c7bb4a | ||
|
|
c109f28bdc | ||
|
|
3f6e9c8420 | ||
|
|
54f2d91ef0 | ||
|
|
b692050079 | ||
|
|
ac7e5b6618 | ||
|
|
e82a2488ee | ||
|
|
ce1e6be98d | ||
|
|
463d45970b | ||
|
|
fbb07226f6 | ||
|
|
f10805fe4b | ||
|
|
812dbc9eb9 | ||
|
|
de7e5deca5 | ||
|
|
8b40a6a591 | ||
|
|
c49863b8c3 | ||
|
|
23b8bb0802 | ||
|
|
1aca805036 | ||
|
|
559d7eae0f | ||
|
|
ba74366801 | ||
|
|
63bd276c48 | ||
|
|
0cd2786153 | ||
|
|
32b2991dd1 | ||
|
|
5cfa5f3950 | ||
|
|
e7672c7801 | ||
|
|
67c648bcad | ||
|
|
76cf2c3e0e | ||
|
|
b753b11356 | ||
|
|
acd93daf00 | ||
|
|
57c9d59996 | ||
|
|
7a4ecb6060 | ||
|
|
82f0ea5b11 | ||
|
|
6fddd13cb2 | ||
|
|
43c7659d18 | ||
|
|
2d15e53dab | ||
|
|
2f2152e039 | ||
|
|
2542189efc | ||
|
|
8e6d39a44b | ||
|
|
c35889d4c6 | ||
|
|
10dedd5ced | ||
|
|
d2966a4c5b | ||
|
|
62b5c43d87 | ||
|
|
b04baa93cd | ||
|
|
2e84208169 | ||
|
|
2265af58ae | ||
|
|
4d190c41cc | ||
|
|
0fbb27d8e3 | ||
|
|
3df3e84270 | ||
|
|
30cc23c5de | ||
|
|
9befb6bd06 | ||
|
|
8a12b78684 | ||
|
|
46168607e8 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -36,19 +36,19 @@ func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.RLock()
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
n.RUnlock()
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
n.Lock()
|
||||
// Double-check the condition to ensure it hasn't changed since the first check.
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
n.Unlock() // It's better to unlock as soon as the protected section is over.
|
||||
// Send the new status to the health channel
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
return newStatus
|
||||
|
||||
@@ -99,9 +99,9 @@ func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -22,7 +23,7 @@ type SignedBid interface {
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() []byte
|
||||
Value() primitives.Wei
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
IsNil() bool
|
||||
@@ -125,8 +126,8 @@ func (b builderBid) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBid) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBid) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -165,7 +166,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
@@ -179,8 +180,8 @@ func (b builderBidCapella) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidCapella) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidCapella) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -222,8 +223,8 @@ func (b builderBidDeneb) Version() int {
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidDeneb) Value() []byte {
|
||||
return b.p.Value
|
||||
func (b builderBidDeneb) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
@@ -249,7 +250,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
// Header --
|
||||
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
|
||||
@@ -330,7 +330,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(pb, nil)
|
||||
ed, err := blocks.NewWrappedExecutionData(pb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -198,12 +198,12 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
|
||||
require.Equal(t, uint64(1), bidHeader.GasUsed())
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
// this matches the value in the testExampleHeaderResponse
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -230,12 +230,11 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -262,12 +261,13 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||
require.DeepEqual(t, bidValue, value.Bytes())
|
||||
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
|
||||
@@ -156,6 +156,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
}
|
||||
return ð.BuilderBid{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
}, nil
|
||||
@@ -484,6 +486,8 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||
}
|
||||
return ð.BuilderBidCapella{
|
||||
Header: header,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
@@ -1022,8 +1026,10 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
return ð.BuilderBidDeneb{
|
||||
Header: header,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,6 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
|
||||
AuthTokenFileName = "auth-token"
|
||||
SystemLogsPrefix = "health/logs"
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -67,6 +69,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -157,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
@@ -313,7 +312,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []interfaces.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
@@ -390,7 +389,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []interfaces.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
@@ -518,7 +517,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []interfaces.Attestation{
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
|
||||
@@ -12,6 +12,8 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,38 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
parsedSetup := &GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
g1Lagrange := &parsedSetup.SetupG1Lagrange
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1s := make([]byte, len(g1Lagrange)*(len(g1Lagrange[0])-2)/2)
|
||||
for i, g1 := range g1Lagrange {
|
||||
copy(g1s[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2s := make([]byte, len(parsedSetup.SetupG2)*(len(parsedSetup.SetupG2[0])-2)/2)
|
||||
for i, g2 := range parsedSetup.SetupG2 {
|
||||
copy(g2s[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1s, g2s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
@@ -37,7 +36,7 @@ import (
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
func (s *Service) OnAttestation(ctx context.Context, a interfaces.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -81,11 +80,11 @@ func (s *Service) OnAttestation(ctx context.Context, a interfaces.Attestation, d
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -74,7 +75,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a interfaces.Attestation
|
||||
a ethpb.Att
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
@@ -126,25 +127,36 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -366,11 +368,11 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,7 +389,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []interfaces.AttesterSlashing) {
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []ethpb.AttSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
@@ -500,7 +502,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -514,12 +516,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if features.Get().EnablePeerDAS {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -549,7 +574,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -568,8 +593,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -591,12 +621,104 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
custodiedSubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
custodiedSubnetCount = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), custodiedSubnetCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -824,7 +824,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
atts := make([]interfaces.Attestation, len(b.Block.Body.Attestations))
|
||||
atts := make([]ethpb.Att, len(b.Block.Body.Attestations))
|
||||
for i, a := range b.Block.Body.Attestations {
|
||||
atts[i] = a
|
||||
}
|
||||
@@ -1963,68 +1963,130 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block().HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -32,7 +31,7 @@ type AttestationStateFetcher interface {
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att interfaces.Attestation) error
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att ethpb.Att) error
|
||||
InForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
@@ -52,7 +51,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a interfaces.Attestation) error {
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) error {
|
||||
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -207,7 +206,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att interfaces.Attestation, disparity time.Duration) error {
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att ethpb.Att, disparity time.Duration) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -74,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -84,7 +83,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
attsToSave := make([]interfaces.Attestation, len(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
@@ -126,14 +125,14 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
// Generate attestations for this block in Slot 1
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
attsToSave := make([]interfaces.Attestation, len(atts))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
for i, a := range atts {
|
||||
attsToSave[i] = a
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
// Verify the target is in forkchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
|
||||
|
||||
// Insert a new block to forkchoice
|
||||
|
||||
@@ -50,9 +50,15 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing)
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
@@ -295,10 +301,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing) {
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []interfaces.AttesterSlashing{slashing})
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []ethpb.AttSlashing{slashing})
|
||||
}
|
||||
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
@@ -479,12 +485,12 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
|
||||
16
beacon-chain/blockchain/receive_data_column.go
Normal file
16
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -82,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -107,15 +107,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -129,7 +131,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -143,7 +145,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -169,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -518,7 +518,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -13,14 +13,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -45,12 +46,17 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ interfaces.Attestation) error {
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -65,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -121,6 +132,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -414,7 +414,7 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a interfaces.Attestation) error {
|
||||
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a ethpb.Att) error {
|
||||
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, interfaces.AttesterSlashing) {}
|
||||
func (*ChainService) ReceiveAttesterSlashing(context.Context, ethpb.AttSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -2,7 +2,6 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/builder"
|
||||
@@ -55,13 +54,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Capella:
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att interfaces.Attestation,
|
||||
att ethpb.Att,
|
||||
totalBalance uint64,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
|
||||
@@ -66,11 +66,11 @@ func ProcessAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -195,47 +195,95 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_OK(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
})
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
})
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
block := util.NewBeaconBlockElectra()
|
||||
block.Block.Body.Attestations = []*ethpb.AttestationElectra{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
|
||||
@@ -154,7 +154,7 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func ProcessAttestationsNoVerifySignature(
|
||||
func VerifyAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
att interfaces.Attestation,
|
||||
att ethpb.Att,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -133,7 +133,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
|
||||
committeeIndices := att.GetCommitteeBitsVal().BitIndices()
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
participantsCount := 0
|
||||
var err error
|
||||
@@ -164,7 +164,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
att interfaces.Attestation,
|
||||
att ethpb.Att,
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
@@ -200,23 +200,6 @@ func ProcessAttestationNoVerifySignature(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att interfaces.Attestation) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
|
||||
}
|
||||
|
||||
// VerifyIndexedAttestation determines the validity of an indexed attestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -464,7 +463,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []interfaces.Attestation
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -480,7 +479,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []interfaces.Attestation{}
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
@@ -579,53 +578,109 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits1 := primitives.NewAttestationCommitteeBits()
|
||||
commBits1.SetBitAt(0, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
CommitteeBits: commBits1,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits2 := primitives.NewAttestationCommitteeBits()
|
||||
commBits2.SetBitAt(1, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
CommitteeBits: commBits2,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
}
|
||||
|
||||
func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
@@ -685,6 +740,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -39,7 +38,7 @@ import (
|
||||
func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []interfaces.AttesterSlashing,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
@@ -56,7 +55,7 @@ func ProcessAttesterSlashings(
|
||||
func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing interfaces.AttesterSlashing,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
@@ -91,18 +90,18 @@ func ProcessAttesterSlashing(
|
||||
}
|
||||
|
||||
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing interfaces.AttesterSlashing) error {
|
||||
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing ethpb.AttSlashing) error {
|
||||
if slashing == nil {
|
||||
return errors.New("nil slashing")
|
||||
}
|
||||
if slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
|
||||
if slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
|
||||
return errors.New("nil attestation")
|
||||
}
|
||||
if slashing.GetFirstAttestation().GetData() == nil || slashing.GetSecondAttestation().GetData() == nil {
|
||||
if slashing.FirstAttestation().GetData() == nil || slashing.SecondAttestation().GetData() == nil {
|
||||
return errors.New("nil attestation data")
|
||||
}
|
||||
att1 := slashing.GetFirstAttestation()
|
||||
att2 := slashing.GetSecondAttestation()
|
||||
att1 := slashing.FirstAttestation()
|
||||
att2 := slashing.SecondAttestation()
|
||||
data1 := att1.GetData()
|
||||
data2 := att2.GetData()
|
||||
if !IsSlashableAttestationData(data1, data2) {
|
||||
@@ -144,11 +143,11 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
}
|
||||
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing interfaces.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
|
||||
func SlashableAttesterIndices(slashing ethpb.AttSlashing) []uint64 {
|
||||
if slashing == nil || slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
|
||||
return nil
|
||||
}
|
||||
indices1 := slashing.GetFirstAttestation().GetAttestingIndices()
|
||||
indices2 := slashing.GetSecondAttestation().GetAttestingIndices()
|
||||
indices1 := slashing.FirstAttestation().GetAttestingIndices()
|
||||
indices2 := slashing.SecondAttestation().GetAttestingIndices()
|
||||
return slice.IntersectionUint64(indices1, indices2)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -58,7 +57,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
@@ -97,7 +96,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
@@ -153,7 +152,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
@@ -226,7 +225,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
@@ -299,7 +298,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
@@ -372,7 +371,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -217,7 +216,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []interfaces.AttesterSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -298,21 +297,6 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
attestation := ðpb.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttestationSignature(ctx, s, attestation)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -92,7 +91,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
|
||||
@@ -163,7 +163,42 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -610,7 +609,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
require.NoError(t, err)
|
||||
@@ -874,7 +873,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
}, big.NewInt(0))
|
||||
})
|
||||
}
|
||||
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
@@ -179,7 +197,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
func createAttestationSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
atts []interfaces.Attestation,
|
||||
atts []ethpb.Att,
|
||||
domain []byte,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
@@ -192,11 +210,11 @@ func createAttestationSignatureBatch(
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.GetSignature()
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, c)
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -233,7 +251,7 @@ func createAttestationSignatureBatch(
|
||||
|
||||
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature batch object.
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []interfaces.Attestation) (*bls.SignatureBatch, error) {
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []ethpb.Att) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
@@ -243,8 +261,8 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
|
||||
dt := params.BeaconConfig().DomainBeaconAttester
|
||||
|
||||
// Split attestations by fork. Note: the signature domain will differ based on the fork.
|
||||
var preForkAtts []interfaces.Attestation
|
||||
var postForkAtts []interfaces.Attestation
|
||||
var preForkAtts []ethpb.Att
|
||||
var postForkAtts []ethpb.Att
|
||||
for _, a := range atts {
|
||||
if slots.ToEpoch(a.GetData().Slot) < fork.Epoch {
|
||||
preForkAtts = append(preForkAtts, a)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -643,10 +642,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
|
||||
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
|
||||
big.NewInt(0),
|
||||
)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]})
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
@@ -1064,7 +1060,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
st, err := prepareValidators(spb, test.Args)
|
||||
require.NoError(t, err)
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
|
||||
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Control.ExpectedError {
|
||||
|
||||
@@ -3,18 +3,22 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"balance_deposits.go",
|
||||
"churn.go",
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -36,9 +40,10 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"balance_deposits_test.go",
|
||||
"churn_test.go",
|
||||
"consolidations_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
@@ -55,7 +60,6 @@ go_test(
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
// state.earliest_consolidation_epoch = earliest_consolidation_epoch
|
||||
//
|
||||
// return state.earliest_consolidation_epoch
|
||||
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance math.Gwei) (primitives.Epoch, error) {
|
||||
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := s.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -47,10 +47,10 @@ func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.Beacon
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(math.Gwei(activeBal))
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
|
||||
// New epoch for consolidations.
|
||||
var consolidationBalanceToConsume math.Gwei
|
||||
var consolidationBalanceToConsume primitives.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
@@ -70,7 +70,7 @@ func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.Beacon
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += math.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
// Consume the balance and update state variables.
|
||||
|
||||
@@ -9,14 +9,13 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func createValidatorsWithTotalActiveBalance(totalBal math.Gwei) []*eth.Validator {
|
||||
num := totalBal / math.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
|
||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
vals[i] = ð.Validator{
|
||||
@@ -25,7 +24,7 @@ func createValidatorsWithTotalActiveBalance(totalBal math.Gwei) []*eth.Validator
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
}
|
||||
}
|
||||
if totalBal%math.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
vals = append(vals, ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -41,9 +40,9 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
consolidationBalance math.Gwei
|
||||
consolidationBalance primitives.Gwei
|
||||
expectedEpoch primitives.Epoch
|
||||
expectedConsolidationBalanceToConsume math.Gwei
|
||||
expectedConsolidationBalanceToConsume primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "compute consolidation with no consolidation balance",
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -166,7 +166,7 @@ func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethp
|
||||
return err
|
||||
}
|
||||
|
||||
if helpers.ConsolidationChurnLimit(math.Gwei(totalBalance)) <= math.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
if helpers.ConsolidationChurnLimit(primitives.Gwei(totalBalance)) <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return errors.New("too little available consolidation churn limit")
|
||||
}
|
||||
|
||||
@@ -240,7 +240,7 @@ func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethp
|
||||
return errors.New("consolidation signature verification failed")
|
||||
}
|
||||
|
||||
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, math.Gwei(source.EffectiveBalance))
|
||||
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(source.EffectiveBalance))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -32,7 +33,7 @@ import (
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance math.Gwei) error {
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
defer span.End()
|
||||
|
||||
@@ -54,13 +55,13 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if math.Gwei(deposit.Amount) > availableForProcessing {
|
||||
if primitives.Gwei(deposit.Amount) > availableForProcessing {
|
||||
break
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
availableForProcessing -= math.Gwei(deposit.Amount)
|
||||
availableForProcessing -= primitives.Gwei(deposit.Amount)
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
@@ -75,3 +76,12 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDepositReceipts is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositReceipts(ctx context.Context, beaconState state.BeaconState, receipts []*enginev1.DepositReceipt) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositReceipts")
|
||||
defer span.End()
|
||||
// TODO: replace with 6110 logic
|
||||
// return b.ProcessDepositReceipts(beaconState, receipts)
|
||||
return beaconState, nil
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -36,7 +35,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), res)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -59,7 +58,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(100), res)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
@@ -93,7 +92,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), res)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
@@ -119,7 +118,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
tab, err = helpers.TotalActiveBalance(tt.state)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, math.Gwei(tab))
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
65
beacon-chain/core/electra/effective_balance_updates.go
Normal file
65
beacon-chain/core/electra/effective_balance_updates.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
// # Update effective balances with hysteresis
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// balance = state.balances[index]
|
||||
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
|
||||
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
||||
// EFFECTIVE_BALANCE_LIMIT = (
|
||||
// MAX_EFFECTIVE_BALANCE_EIP7251 if has_compounding_withdrawal_credential(validator)
|
||||
// else MIN_ACTIVATION_BALANCE
|
||||
// )
|
||||
//
|
||||
// if (
|
||||
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
// or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
// ):
|
||||
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
|
||||
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
bals := state.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
if val == nil {
|
||||
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
|
||||
val.EffectiveBalance = effectiveBal
|
||||
return false, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
return state.ApplyToEveryValidator(validatorFunc)
|
||||
}
|
||||
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessEffectiveBalnceUpdates(t *testing.T) {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
wantErr bool
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "validator with compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalanceElectra, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator without compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
|
||||
WithdrawalCredentials: nil,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator effective balance moves only when outside of threshold",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold - 1, // beyond downward threshold
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold + 1, // within downward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold + 1, // beyond upward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold - 1, // within upward threshold
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
// validator 0 has a balance diff exceeding the threshold so a diff should be applied to
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// validator 1 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
|
||||
// Validator 2 has a balance diff exceeding the threshold so a diff should be applied to the
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err = bs.ValidatorAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// Validator 3 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessEffectiveBalanceUpdates(tt.state)
|
||||
require.Equal(t, tt.wantErr, err != nil, "unexpected error returned wanted error=nil (%s), got error=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
34
beacon-chain/core/electra/registry_updates.go
Normal file
34
beacon-chain/core/electra/registry_updates.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
)
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
//
|
||||
// if (
|
||||
// is_active_validator(validator, get_current_epoch(state))
|
||||
// and validator.effective_balance <= EJECTION_BALANCE
|
||||
// ):
|
||||
// initiate_validator_exit(state, ValidatorIndex(index))
|
||||
//
|
||||
// # Activate all eligible validators
|
||||
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
// for validator in state.validators:
|
||||
// if is_eligible_for_activation(state, validator):
|
||||
// validator.activation_epoch = activation_epoch
|
||||
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
// TODO: replace with real implementation
|
||||
return state, nil
|
||||
}
|
||||
@@ -1,8 +1,15 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Re-exports for methods that haven't changed in Electra.
|
||||
@@ -21,3 +28,98 @@ var (
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_epoch(state: BeaconState) -> None:
|
||||
// process_justification_and_finalization(state)
|
||||
// process_inactivity_updates(state)
|
||||
// process_rewards_and_penalties(state)
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_balance_deposits(state) # New in EIP7251
|
||||
// process_pending_consolidations(state) # New in EIP7251
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
state, err = ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -271,9 +270,9 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
|
||||
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(math.Gwei(tab)),
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(math.Gwei(tab)),
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -152,14 +151,14 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), balance)
|
||||
require.Equal(t, primitives.Gwei(0), balance)
|
||||
|
||||
tab, err := helpers.TotalActiveBalance(mSt)
|
||||
require.NoError(t, err)
|
||||
|
||||
ebtc, err := mSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitChurnLimit(math.Gwei(tab)), ebtc)
|
||||
require.Equal(t, helpers.ActivationExitChurnLimit(primitives.Gwei(tab)), ebtc)
|
||||
|
||||
eee, err := mSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
@@ -167,7 +166,7 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
|
||||
cbtc, err := mSt.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ConsolidationChurnLimit(math.Gwei(tab)), cbtc)
|
||||
require.Equal(t, helpers.ConsolidationChurnLimit(primitives.Gwei(tab)), cbtc)
|
||||
|
||||
earliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
|
||||
80
beacon-chain/core/electra/withdrawals.go
Normal file
80
beacon-chain/core/electra/withdrawals.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ProcessExecutionLayerWithdrawRequests processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state triggered by the execution layer.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_execution_layer_withdrawal_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
|
||||
//
|
||||
// ) -> None:
|
||||
// amount = execution_layer_withdrawal_request.amount
|
||||
// is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||
//
|
||||
// # If partial withdrawal queue is full, only full exits are processed
|
||||
// if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkey exists
|
||||
// request_pubkey = execution_layer_withdrawal_request.validator_pubkey
|
||||
// if request_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
||||
// validator = state.validators[index]
|
||||
//
|
||||
// # Verify withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(validator)
|
||||
// is_correct_source_address = (
|
||||
// validator.withdrawal_credentials[12:] == execution_layer_withdrawal_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
// # Verify the validator is active
|
||||
// if not is_active_validator(validator, get_current_epoch(state)):
|
||||
// return
|
||||
// # Verify exit has not been initiated
|
||||
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// # Verify the validator has been active long enough
|
||||
// if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index)
|
||||
//
|
||||
// if is_full_exit_request:
|
||||
// # Only exit validator if it has no pending withdrawals in the queue
|
||||
// if pending_balance_to_withdraw == 0:
|
||||
// initiate_validator_exit(state, index)
|
||||
// return
|
||||
//
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
//
|
||||
// # Only allow partial withdrawals with compounding withdrawal credentials
|
||||
// if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
|
||||
// withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
// state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
|
||||
// index=index,
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessExecutionLayerWithdrawRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.ExecutionLayerWithdrawalRequest) (state.BeaconState, error) {
|
||||
//TODO: replace with real implementation
|
||||
return st, nil
|
||||
}
|
||||
@@ -470,7 +470,7 @@ func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ package operation
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -33,12 +32,15 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
type UnAggregatedAttReceivedData struct {
|
||||
// Attestation is the unaggregated attestation object.
|
||||
Attestation interfaces.Attestation
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
|
||||
@@ -76,5 +78,9 @@ type ProposerSlashingReceivedData struct {
|
||||
|
||||
// AttesterSlashingReceivedData is the data sent with AttesterSlashingReceived events.
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing interfaces.AttesterSlashing
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -73,12 +74,10 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -22,7 +21,7 @@ var (
|
||||
// ValidateNilAttestation checks if any composite field of input attestation is nil.
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func ValidateNilAttestation(attestation interfaces.Attestation) error {
|
||||
func ValidateNilAttestation(attestation ethpb.Att) error {
|
||||
if attestation == nil {
|
||||
return errors.New("attestation can't be nil")
|
||||
}
|
||||
@@ -72,7 +71,7 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
|
||||
// IsAggregated returns true if the attestation is an aggregated attestation,
|
||||
// false otherwise.
|
||||
func IsAggregated(attestation interfaces.Attestation) bool {
|
||||
func IsAggregated(attestation ethpb.Att) bool {
|
||||
return attestation.GetAggregationBits().Count() > 1
|
||||
}
|
||||
|
||||
@@ -91,7 +90,7 @@ func IsAggregated(attestation interfaces.Attestation) bool {
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att interfaces.Attestation) uint64 {
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -239,7 +238,7 @@ func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
func TestValidateNilAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
attestation interfaces.Attestation
|
||||
attestation ethpb.Att
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -58,6 +59,29 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
|
||||
return committeesPerSlot
|
||||
}
|
||||
|
||||
// AttestationCommittees returns beacon state committees that reflect attestation's committee indices.
|
||||
func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) ([][]primitives.ValidatorIndex, error) {
|
||||
var committees [][]primitives.ValidatorIndex
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
committees = make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
for i, ci := range committeeIndices {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees[i] = committee
|
||||
}
|
||||
} else {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees = [][]primitives.ValidatorIndex{committee}
|
||||
}
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
|
||||
// is a spec implementation where state is used as an argument. In case of state retrieval
|
||||
// becomes expensive, consider using BeaconCommittee below.
|
||||
@@ -143,105 +167,137 @@ func BeaconCommittee(
|
||||
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
}
|
||||
|
||||
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignmentContainer struct {
|
||||
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignment struct {
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
}
|
||||
|
||||
// CommitteeAssignments is a map of validator indices pointing to the appropriate committee
|
||||
// assignment for the given epoch.
|
||||
//
|
||||
// 1. Determine the proposer validator index for each slot.
|
||||
// 2. Compute all committees.
|
||||
// 3. Determine the attesting slot for each committee.
|
||||
// 4. Construct a map of validator indices pointing to the respective committees.
|
||||
func CommitteeAssignments(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
epoch primitives.Epoch,
|
||||
) (map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
|
||||
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
|
||||
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
|
||||
nextEpoch := time.NextEpoch(state)
|
||||
if epoch > nextEpoch {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"epoch %d can't be greater than next epoch %d",
|
||||
epoch,
|
||||
nextEpoch,
|
||||
)
|
||||
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
|
||||
}
|
||||
|
||||
// We determine the slots in which proposers are supposed to act.
|
||||
// Some validators may need to propose multiple times per epoch, so
|
||||
// we use a map of proposer idx -> []slot to keep track of this possibility.
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
minValidStartSlot := primitives.Slot(0)
|
||||
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
if stateSlot := state.Slot(); stateSlot >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = stateSlot - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
}
|
||||
if startSlot < minValidStartSlot {
|
||||
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
return fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerAssignments calculates proposer assignments for each validator during the specified epoch.
|
||||
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
|
||||
// proposer for that slot and assigns them accordingly.
|
||||
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proposerIndexToSlots := make(map[primitives.ValidatorIndex][]primitives.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
}
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
if _, ok := proposerAssignments[i]; !ok {
|
||||
proposerAssignments[i] = make([]primitives.Slot, 0)
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
|
||||
// It retrieves active validator indices, determines the number of committees per slot, and computes
|
||||
// assignments for each validator based on their presence in the provided validators slice.
|
||||
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve active validator count for the specified epoch.
|
||||
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
// Each slot in an epoch has a different set of committees. This value is derived from the
|
||||
// active validator set, which does not change.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
|
||||
validatorIndexToCommittee := make(map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
|
||||
|
||||
// Compute all committees for all slots.
|
||||
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
// Compute committees.
|
||||
// Determine the number of committees per slot based on the number of active validator indices.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Compute committees for the current slot.
|
||||
for j := uint64(0); j < numCommitteesPerSlot; j++ {
|
||||
slot := startSlot + i
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j) /*committee index*/)
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cac := &CommitteeAssignmentContainer{
|
||||
Committee: committee,
|
||||
CommitteeIndex: primitives.CommitteeIndex(j),
|
||||
AttesterSlot: slot,
|
||||
}
|
||||
for _, vIndex := range committee {
|
||||
validatorIndexToCommittee[vIndex] = cac
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
assignments[vIndex] = &CommitteeAssignment{}
|
||||
}
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return validatorIndexToCommittee, proposerIndexToSlots, nil
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
// VerifyBitfieldLength verifies that a bitfield length matches the given committee size.
|
||||
@@ -257,7 +313,7 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
|
||||
|
||||
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
|
||||
// a valid length matching the size of the committee.
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att interfaces.Attestation) error {
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att ethpb.Att) error {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve beacon committees")
|
||||
|
||||
@@ -104,7 +104,10 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
|
||||
_, err = helpers.ProposerAssignments(context.Background(), state, epoch+1)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
}
|
||||
|
||||
@@ -128,10 +131,10 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
for _, slots := range assignments {
|
||||
for _, s := range slots {
|
||||
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
|
||||
}
|
||||
}
|
||||
@@ -140,6 +143,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
@@ -150,6 +154,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
ActivationEpoch: activationEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
validatorIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -201,14 +206,16 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
cac := assignments[tt.index]
|
||||
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
|
||||
assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index)
|
||||
if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot {
|
||||
proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err)
|
||||
if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot {
|
||||
t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d",
|
||||
tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index)
|
||||
tt.proposerSlot, proposerAssignments[tt.index][0], tt.index)
|
||||
}
|
||||
assert.DeepEqual(t, tt.committee, cac.Committee, "Unexpected committee for validator index %d", tt.index)
|
||||
})
|
||||
@@ -238,13 +245,13 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
@@ -264,7 +271,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
@@ -286,12 +293,12 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
|
||||
slotsWithProposers := make(map[primitives.Slot]bool)
|
||||
for _, proposerSlots := range proposerIndexToSlots {
|
||||
for _, slot := range proposerSlots {
|
||||
for _, slots := range assignments {
|
||||
for _, slot := range slots {
|
||||
slotsWithProposers[slot] = true
|
||||
}
|
||||
}
|
||||
@@ -708,3 +715,37 @@ func TestCommitteeIndices(t *testing.T) {
|
||||
indices := helpers.CommitteeIndices(bitfield)
|
||||
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
|
||||
}
|
||||
|
||||
func TestAttestationCommittees(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{CommitteeIndex: 0}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
bits := primitives.NewAttestationCommitteeBits()
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(1, true)
|
||||
att := ðpb.AttestationElectra{CommitteeBits: bits, Data: ðpb.AttestationData{}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package helpers
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// BalanceChurnLimit for the current active balance, in gwei.
|
||||
@@ -19,12 +19,12 @@ import (
|
||||
// get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT
|
||||
// )
|
||||
// return churn - churn % EFFECTIVE_BALANCE_INCREMENT
|
||||
func BalanceChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
func BalanceChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
|
||||
churn := max(
|
||||
params.BeaconConfig().MinPerEpochChurnLimitElectra,
|
||||
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
|
||||
)
|
||||
return math.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
return primitives.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
}
|
||||
|
||||
// ActivationExitChurnLimit for the current active balance, in gwei.
|
||||
@@ -37,8 +37,8 @@ func BalanceChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
// Return the churn limit for the current epoch dedicated to activations and exits.
|
||||
// """
|
||||
// return min(MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT, get_balance_churn_limit(state))
|
||||
func ActivationExitChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
return min(math.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit), BalanceChurnLimit(activeBalance))
|
||||
func ActivationExitChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
|
||||
return min(primitives.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit), BalanceChurnLimit(activeBalance))
|
||||
}
|
||||
|
||||
// ConsolidationChurnLimit for the current active balance, in gwei.
|
||||
@@ -48,6 +48,6 @@ func ActivationExitChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
//
|
||||
// def get_consolidation_churn_limit(state: BeaconState) -> Gwei:
|
||||
// return get_balance_churn_limit(state) - get_activation_exit_churn_limit(state)
|
||||
func ConsolidationChurnLimit(activeBalance math.Gwei) math.Gwei {
|
||||
func ConsolidationChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
|
||||
return BalanceChurnLimit(activeBalance) - ActivationExitChurnLimit(activeBalance)
|
||||
}
|
||||
|
||||
@@ -5,30 +5,30 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestBalanceChurnLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
activeBalance math.Gwei
|
||||
expected math.Gwei
|
||||
activeBalance primitives.Gwei
|
||||
expected primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "less than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
|
||||
activeBalance: 111,
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "modulo EFFECTIVE_BALANCE_INCREMENT",
|
||||
activeBalance: math.Gwei(111 + params.BeaconConfig().MinPerEpochChurnLimitElectra*params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
activeBalance: primitives.Gwei(111 + params.BeaconConfig().MinPerEpochChurnLimitElectra*params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "more than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
|
||||
activeBalance: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement),
|
||||
activeBalance: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -42,18 +42,18 @@ func TestBalanceChurnLimit(t *testing.T) {
|
||||
func TestActivationExitChurnLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
activeBalance math.Gwei
|
||||
expected math.Gwei
|
||||
activeBalance primitives.Gwei
|
||||
expected primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "less than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
|
||||
activeBalance: 1,
|
||||
expected: math.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
|
||||
},
|
||||
{
|
||||
name: "more than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
|
||||
activeBalance: math.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: math.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit),
|
||||
activeBalance: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
|
||||
expected: primitives.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -67,6 +67,6 @@ func TestActivationExitChurnLimit(t *testing.T) {
|
||||
// FuzzConsolidationChurnLimit exercises BalanceChurnLimit and ActivationExitChurnLimit
|
||||
func FuzzConsolidationChurnLimit(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, activeBalance uint64) {
|
||||
helpers.ConsolidationChurnLimit(math.Gwei(activeBalance))
|
||||
helpers.ConsolidationChurnLimit(primitives.Gwei(activeBalance))
|
||||
})
|
||||
}
|
||||
|
||||
36
beacon-chain/core/peerdas/BUILD.bazel
Normal file
36
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,36 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
302
beacon-chain/core/peerdas/helpers.go
Normal file
302
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Bytes per cell
|
||||
const bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified number of columns")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
copiedElem := fieldElement
|
||||
cellBytes = append(cellBytes, copiedElem[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
blobs []cKzg4844.Blob,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
copiedElem := fieldElement
|
||||
cellBytes = append(cellBytes, copiedElem[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
|
||||
// data column.
|
||||
func VerifyDataColumnSidecarKZGProofs(sc *ethpb.DataColumnSidecar) (bool, error) {
|
||||
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
blobsCount := len(sc.DataColumn)
|
||||
|
||||
rowIdx := make([]uint64, 0, blobsCount)
|
||||
colIdx := make([]uint64, 0, blobsCount)
|
||||
for i := 0; i < len(sc.DataColumn); i++ {
|
||||
copiedI := uint64(i)
|
||||
rowIdx = append(rowIdx, copiedI)
|
||||
colI := sc.ColumnIndex
|
||||
colIdx = append(colIdx, colI)
|
||||
}
|
||||
ckzgComms := make([]cKzg4844.Bytes48, 0, len(sc.KzgCommitments))
|
||||
for _, com := range sc.KzgCommitments {
|
||||
ckzgComms = append(ckzgComms, cKzg4844.Bytes48(com))
|
||||
}
|
||||
var cells []cKzg4844.Cell
|
||||
for _, ce := range sc.DataColumn {
|
||||
var newCell []cKzg4844.Bytes32
|
||||
for i := 0; i < len(ce); i += 32 {
|
||||
newCell = append(newCell, cKzg4844.Bytes32(ce[i:i+32]))
|
||||
}
|
||||
cells = append(cells, cKzg4844.Cell(newCell))
|
||||
}
|
||||
var proofs []cKzg4844.Bytes48
|
||||
for _, p := range sc.KzgProof {
|
||||
proofs = append(proofs, cKzg4844.Bytes48(p))
|
||||
}
|
||||
return cKzg4844.VerifyCellKZGProofBatch(ckzgComms, rowIdx, colIdx, cells, proofs)
|
||||
}
|
||||
91
beacon-chain/core/peerdas/helpers_test.go
Normal file
91
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) ckzg4844.Blob {
|
||||
var blob ckzg4844.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob ckzg4844.Blob) (ckzg4844.KZGCommitment, ckzg4844.KZGProof, error) {
|
||||
commitment, err := ckzg4844.BlobToKZGCommitment(&blob)
|
||||
if err != nil {
|
||||
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
|
||||
}
|
||||
proof, err := ckzg4844.ComputeBlobKZGProof(&blob, ckzg4844.Bytes48(commitment))
|
||||
if err != nil {
|
||||
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
|
||||
}
|
||||
return commitment, proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
comms := [][]byte{}
|
||||
blobs := []ckzg4844.Blob{}
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(sidecar)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
@@ -256,14 +256,18 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
} else if state.Version() >= version.Altair {
|
||||
} else if state.Version() <= version.Deneb {
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch")
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
state, err = electra.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := state.SetSlot(state.Slot() + 1); err != nil {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
@@ -223,19 +224,29 @@ func ProcessBlockNoVerifyAnySig(
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_operations(state: BeaconState, body: ReadOnlyBeaconBlockBody) -> None:
|
||||
// # Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation)
|
||||
// for_ops(body.deposits, process_deposit)
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func ProcessOperationsNoVerifyAttsSigs(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
@@ -257,11 +268,16 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb, version.Electra:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Electra:
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("block does not have correct version")
|
||||
}
|
||||
@@ -378,6 +394,72 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// electraOperations
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func electraOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
|
||||
// Electra extends the altair operations.
|
||||
st, err := altairOperations(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := block.Body()
|
||||
bod, ok := b.(interfaces.ROBlockBodyElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast block body to electra block body")
|
||||
}
|
||||
e, err := bod.Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data from block")
|
||||
}
|
||||
exe, ok := e.(interfaces.ExecutionDataElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = electra.ProcessExecutionLayerWithdrawRequests(ctx, st, exe.WithdrawalRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
st, err = electra.ProcessDepositReceipts(ctx, st, exe.DepositReceipts())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
|
||||
if err := electra.ProcessConsolidations(ctx, st, bod.Consolidations()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process consolidations")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -40,7 +40,6 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -103,7 +103,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(math.Gwei(validator.EffectiveBalance))
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -146,7 +145,7 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
// Pre-check: Exit balance to consume should be zero.
|
||||
ebtc, err := state.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, math.Gwei(0), ebtc)
|
||||
require.Equal(t, primitives.Gwei(0), ebtc)
|
||||
|
||||
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
|
||||
require.NoError(t, err)
|
||||
@@ -161,7 +160,7 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
// Check that the exit balance to consume has been updated on the state.
|
||||
ebtc, err = state.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, math.Gwei(0), ebtc, "Exit balance to consume was not updated")
|
||||
require.NotEqual(t, primitives.Gwei(0), ebtc, "Exit balance to consume was not updated")
|
||||
}
|
||||
|
||||
func TestSlashValidator_OK(t *testing.T) {
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -20,6 +21,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
151
beacon-chain/das/availability_columns.go
Normal file
151
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
verifier ColumnBatchVerifier
|
||||
nodeID enode.ID
|
||||
}
|
||||
|
||||
type ColumnBatchVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
verifier: verifier,
|
||||
nodeID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
}
|
||||
// Return early for blocks that are pre-deneb or which do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filterColumns(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
// Do thorough verifications of each BlobSidecar for the block.
|
||||
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
|
||||
vscs, err := s.verifier.VerifiedRODataColumns(ctx, b, sidecars)
|
||||
if err != nil {
|
||||
var me verification.VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).
|
||||
Debug("invalid ColumnSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", root)
|
||||
}
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for i := range vscs {
|
||||
if err := s.store.SaveDataColumn(vscs[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to save ColumnSidecar index %d for block %#x", vscs[i].ColumnIndex, root)
|
||||
}
|
||||
}
|
||||
// All ColumnSidecars are persisted - da check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
func fullCommitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentsArray, error) {
|
||||
var ar safeCommitmentsArray
|
||||
if b.Version() < version.Deneb {
|
||||
return ar, nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return ar, nil
|
||||
}
|
||||
kc, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return ar, err
|
||||
}
|
||||
for i := range ar {
|
||||
copy(ar[i], kc)
|
||||
}
|
||||
return ar, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -117,6 +134,35 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, kc safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.RODataColumn, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.colScs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !reflect.DeepEqual(kc[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.colScs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -129,3 +175,14 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
func (s safeCommitmentsArray) count() int {
|
||||
for i := range s {
|
||||
if s[i] == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return fieldparams.NumberOfColumns
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,8 +40,15 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -99,6 +110,7 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -221,6 +233,110 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -246,6 +362,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -289,6 +419,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -321,6 +506,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
@@ -68,9 +68,12 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
// TODO: Separate blob index checks from data column index checks
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
*/
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
t.Skip("Use new test for data columns")
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -3,7 +3,6 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -100,7 +99,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}, big.NewInt(0))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
@@ -125,7 +124,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}, big.NewInt(0))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
@@ -152,7 +151,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositReceiptsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
}, big.NewInt(0))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
return st
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"log_processing.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
"payload_body.go",
|
||||
"prometheus.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
@@ -86,6 +87,8 @@ go_test(
|
||||
"execution_chain_test.go",
|
||||
"init_test.go",
|
||||
"log_processing_test.go",
|
||||
"mock_test.go",
|
||||
"payload_body_test.go",
|
||||
"prometheus_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@@ -34,12 +33,19 @@ var (
|
||||
supportedEngineEndpoints = []string{
|
||||
NewPayloadMethod,
|
||||
NewPayloadMethodV2,
|
||||
NewPayloadMethodV3,
|
||||
NewPayloadMethodV4,
|
||||
ForkchoiceUpdatedMethod,
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
ForkchoiceUpdatedMethodV3,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadMethodV4,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetPayloadBodiesByHashV2,
|
||||
GetPayloadBodiesByRangeV2,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -69,16 +75,22 @@ const (
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
BlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
// GetPayloadBodiesByHashV1 is the engine_getPayloadBodiesByHashX JSON-RPC method for pre-Electra payloads.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
// GetPayloadBodiesByHashV2 is the engine_getPayloadBodiesByHashX JSON-RPC method introduced by Electra.
|
||||
GetPayloadBodiesByHashV2 = "engine_getPayloadBodiesByHashV2"
|
||||
// GetPayloadBodiesByRangeV1 is the engine_getPayloadBodiesByRangeX JSON-RPC method for pre-Electra payloads.
|
||||
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
|
||||
// GetPayloadBodiesByRangeV2 is the engine_getPayloadBodiesByRangeX JSON-RPC method introduced by Electra.
|
||||
GetPayloadBodiesByRangeV2 = "engine_getPayloadBodiesByRangeV2"
|
||||
// ExchangeCapabilities request string for JSON-RPC.
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
type ForkchoiceUpdatedResponse struct {
|
||||
@@ -106,7 +118,7 @@ type EngineCaller interface {
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
@@ -254,69 +266,43 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
}
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionPayloadElectraWithValueAndBlobsBundle{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().DenebForkEpoch {
|
||||
return GetPayloadMethodV3, &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return GetPayloadMethodV2, &pb.ExecutionPayloadCapellaWithValue{}
|
||||
}
|
||||
return GetPayloadMethod, &pb.ExecutionPayload{}
|
||||
}
|
||||
|
||||
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
|
||||
// It returns the execution data as well as the blobs bundle.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
result := &pb.ExecutionPayloadElectraWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV4, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadElectra(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, result.BlobsBundle, result.ShouldOverrideBuilder, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, result.BlobsBundle, result.ShouldOverrideBuilder, nil
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToWei(result.Value))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, nil, false, nil
|
||||
}
|
||||
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
method, result := getPayloadMethodAndMessage(slot)
|
||||
err := s.rpcClient.CallContext(ctx, result, method, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, nil, false, handleRPCError(err)
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
ed, err := blocks.WrappedExecutionPayload(result)
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
return ed, nil, false, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
@@ -509,93 +495,19 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
|
||||
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
// Exit early if there are no execution hashes.
|
||||
if len(executionBlockHashes) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
if len(result) != len(executionBlockHashes) {
|
||||
return nil, fmt.Errorf("mismatch of payloads retrieved from the execution client: %d vs %d", len(result), len(executionBlockHashes))
|
||||
}
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
|
||||
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByRangeV1, start, count)
|
||||
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := blocks.BeaconBlockIsNil(blindedBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
}
|
||||
if !blindedBlock.Block().IsBlinded() {
|
||||
return nil, errors.New("can only reconstruct block from blinded block format")
|
||||
}
|
||||
header, err := blindedBlock.Block().Body().Execution()
|
||||
reconstructed, err := s.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{blindedBlock})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header.IsNil() {
|
||||
return nil, errors.New("execution payload header in blinded block was nil")
|
||||
if len(reconstructed) != 1 {
|
||||
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(reconstructed))
|
||||
}
|
||||
|
||||
// If the payload header has a block hash of 0x0, it means we are pre-merge and should
|
||||
// simply return the block with an empty execution payload.
|
||||
if bytes.Equal(header.BlockHash(), params.BeaconConfig().ZeroHash[:]) {
|
||||
payload, err := buildEmptyExecutionPayload(blindedBlock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload)
|
||||
}
|
||||
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
payload, err := s.retrievePayloadFromExecutionHash(ctx, executionBlockHash, header, blindedBlock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reconstructedExecutionPayloadCount.Add(1)
|
||||
return fullBlock, nil
|
||||
return reconstructed[0], nil
|
||||
}
|
||||
|
||||
// ReconstructFullBellatrixBlockBatch takes in a batch of blinded beacon blocks and reconstructs
|
||||
@@ -603,114 +515,16 @@ func (s *Service) ReconstructFullBlock(
|
||||
func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error) {
|
||||
if len(blindedBlocks) == 0 {
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
}
|
||||
var executionHashes []common.Hash
|
||||
var validExecPayloads []int
|
||||
var zeroExecPayloads []int
|
||||
for i, b := range blindedBlocks {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
}
|
||||
if !b.Block().IsBlinded() {
|
||||
return nil, errors.New("can only reconstruct block from blinded block format")
|
||||
}
|
||||
header, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header.IsNil() {
|
||||
return nil, errors.New("execution payload header in blinded block was nil")
|
||||
}
|
||||
// Determine if the block is pre-merge or post-merge. Depending on the result,
|
||||
// we will ask the execution engine for the full payload.
|
||||
if bytes.Equal(header.BlockHash(), params.BeaconConfig().ZeroHash[:]) {
|
||||
zeroExecPayloads = append(zeroExecPayloads, i)
|
||||
} else {
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
validExecPayloads = append(validExecPayloads, i)
|
||||
executionHashes = append(executionHashes, executionBlockHash)
|
||||
}
|
||||
}
|
||||
fullBlocks, err := s.retrievePayloadsFromExecutionHashes(ctx, executionHashes, validExecPayloads, blindedBlocks)
|
||||
unb, err := reconstructBlindedBlockBatch(ctx, s.rpcClient, blindedBlocks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For blocks that are pre-merge we simply reconstruct them via an empty
|
||||
// execution payload.
|
||||
for _, realIdx := range zeroExecPayloads {
|
||||
bblock := blindedBlocks[realIdx]
|
||||
payload, err := buildEmptyExecutionPayload(bblock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
}
|
||||
reconstructedExecutionPayloadCount.Add(float64(len(blindedBlocks)))
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) {
|
||||
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if len(pBodies) != 1 {
|
||||
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
|
||||
}
|
||||
bdy := pBodies[0]
|
||||
return fullPayloadFromPayloadBody(header, bdy, version)
|
||||
}
|
||||
|
||||
// This method assumes that the provided execution hashes are all valid and part of the
|
||||
// canonical chain.
|
||||
func (s *Service) retrievePayloadsFromExecutionHashes(
|
||||
ctx context.Context,
|
||||
executionHashes []common.Hash,
|
||||
validExecPayloads []int,
|
||||
blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
|
||||
var payloadBodies []*pb.ExecutionPayloadBodyV1
|
||||
var err error
|
||||
|
||||
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
|
||||
}
|
||||
|
||||
// For each valid payload, we reconstruct the full block from it with the
|
||||
// blinded block.
|
||||
for sliceIdx, realIdx := range validExecPayloads {
|
||||
var payload interfaces.ExecutionData
|
||||
bblock := blindedBlocks[realIdx]
|
||||
b := payloadBodies[sliceIdx]
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
|
||||
}
|
||||
header, err := bblock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(bblock, payload.Proto())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlocks[realIdx] = fullBlock
|
||||
}
|
||||
return fullBlocks, nil
|
||||
reconstructedExecutionPayloadCount.Add(float64(len(unb)))
|
||||
return unb, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBodyV1, bVersion int,
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || body == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
@@ -732,7 +546,7 @@ func fullPayloadFromPayloadBody(
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
})
|
||||
case version.Capella:
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
@@ -749,9 +563,9 @@ func fullPayloadFromPayloadBody(
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
Withdrawals: body.Withdrawals,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
@@ -776,11 +590,11 @@ func fullPayloadFromPayloadBody(
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Electra:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
@@ -790,26 +604,36 @@ func fullPayloadFromPayloadBody(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from execution payload header")
|
||||
}
|
||||
wr, err := pb.JsonWithdrawalRequestsToProto(body.WithdrawalRequests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dr, err := pb.JsonDepositRequestsToProto(body.DepositRequests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadElectra(
|
||||
&pb.ExecutionPayloadElectra{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: body.Transactions,
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
DepositReceipts: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
|
||||
}
|
||||
@@ -943,6 +767,22 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}, nil
|
||||
case version.Electra:
|
||||
return &pb.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
WithdrawalRequests: make([]*pb.ExecutionLayerWithdrawalRequest, 0),
|
||||
DepositReceipts: make([]*pb.DepositReceipt, 0),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
221
beacon-chain/execution/mock_test.go
Normal file
221
beacon-chain/execution/mock_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
var mockHandlerDefaultName = "__default__"
|
||||
|
||||
type jsonError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type jsonrpcMessage struct {
|
||||
Version string `json:"jsonrpc,omitempty"`
|
||||
ID json.RawMessage `json:"id,omitempty"`
|
||||
Method string `json:"method,omitempty"`
|
||||
Params json.RawMessage `json:"params,omitempty"`
|
||||
Error *jsonError `json:"error,omitempty"`
|
||||
Result json.RawMessage `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
type mockHandler func(*jsonrpcMessage, http.ResponseWriter, *http.Request)
|
||||
|
||||
type mockEngine struct {
|
||||
t *testing.T
|
||||
handlers map[string]mockHandler
|
||||
calls map[string][]*jsonrpcMessage
|
||||
}
|
||||
|
||||
func newMockEngine(t *testing.T) (*rpc.Client, *mockEngine) {
|
||||
s := &mockEngine{t: t, handlers: make(map[string]mockHandler), calls: make(map[string][]*jsonrpcMessage)}
|
||||
srv := httptest.NewServer(s)
|
||||
c, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
return c, s
|
||||
}
|
||||
|
||||
func (s *mockEngine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
msg := &jsonrpcMessage{}
|
||||
defer func() {
|
||||
s.calls[msg.Method] = append(s.calls[msg.Method], msg)
|
||||
}()
|
||||
if err := json.NewDecoder(r.Body).Decode(msg); err != nil {
|
||||
http.Error(w, "failed to decode request: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(s.t, r.Body.Close())
|
||||
}()
|
||||
handler, ok := s.handlers[msg.Method]
|
||||
if !ok {
|
||||
// Fallback to default handler if it is registered.
|
||||
handler, ok = s.handlers[mockHandlerDefaultName]
|
||||
if !ok {
|
||||
s.t.Fatalf("mockEngine called with unexpected method %s", msg.Method)
|
||||
}
|
||||
}
|
||||
handler(msg, w, r)
|
||||
}
|
||||
|
||||
func (s *mockEngine) register(method string, handler mockHandler) {
|
||||
s.handlers[method] = handler
|
||||
}
|
||||
|
||||
func (s *mockEngine) registerDefault(handler mockHandler) {
|
||||
s.handlers[mockHandlerDefaultName] = handler
|
||||
}
|
||||
|
||||
func (s *mockEngine) callCount(method string) int {
|
||||
return len(s.calls[method])
|
||||
}
|
||||
|
||||
func mockParseUintList(t *testing.T, data json.RawMessage) []uint64 {
|
||||
var list []uint64
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("failed to parse uint list: %v", err)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func mockParseHexByteList(t *testing.T, data json.RawMessage) []hexutil.Bytes {
|
||||
var list [][]hexutil.Bytes
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("failed to parse hex byte list: %v", err)
|
||||
}
|
||||
require.Equal(t, 1, len(list))
|
||||
return list[0]
|
||||
}
|
||||
|
||||
func strToHexBytes(t *testing.T, s string) hexutil.Bytes {
|
||||
b := hexutil.Bytes{}
|
||||
require.NoError(t, b.UnmarshalText([]byte(s)))
|
||||
return b
|
||||
}
|
||||
|
||||
func mockWriteResult(t *testing.T, w http.ResponseWriter, req *jsonrpcMessage, result any) {
|
||||
marshaled, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
req.Result = marshaled
|
||||
require.NoError(t, json.NewEncoder(w).Encode(req))
|
||||
}
|
||||
|
||||
func TestParseRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
method string
|
||||
uintArgs []uint64
|
||||
byteArgs []hexutil.Bytes
|
||||
}{
|
||||
{
|
||||
method: GetPayloadBodiesByHashV1,
|
||||
byteArgs: []hexutil.Bytes{
|
||||
strToHexBytes(t, "0x656d707479000000000000000000000000000000000000000000000000000000"),
|
||||
strToHexBytes(t, "0x66756c6c00000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByHashV2,
|
||||
byteArgs: []hexutil.Bytes{
|
||||
strToHexBytes(t, "0x656d707479000000000000000000000000000000000000000000000000000000"),
|
||||
strToHexBytes(t, "0x66756c6c00000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV1,
|
||||
uintArgs: []uint64{0, 1},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV2,
|
||||
uintArgs: []uint64{math.MaxUint64, 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
require.Equal(t, c.method, msg.Method)
|
||||
nr := uint64(len(c.byteArgs))
|
||||
if len(c.byteArgs) > 0 {
|
||||
require.DeepEqual(t, c.byteArgs, mockParseHexByteList(t, msg.Params))
|
||||
}
|
||||
if len(c.uintArgs) > 0 {
|
||||
rang := mockParseUintList(t, msg.Params)
|
||||
require.DeepEqual(t, c.uintArgs, rang)
|
||||
nr = rang[1]
|
||||
}
|
||||
mockWriteResult(t, w, msg, make([]*pb.ExecutionPayloadBody, nr))
|
||||
})
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBody, 0)
|
||||
var args []interface{}
|
||||
if len(c.byteArgs) > 0 {
|
||||
args = []interface{}{c.byteArgs}
|
||||
}
|
||||
if len(c.uintArgs) > 0 {
|
||||
args = make([]interface{}, len(c.uintArgs))
|
||||
for i := range c.uintArgs {
|
||||
args[i] = c.uintArgs[i]
|
||||
}
|
||||
}
|
||||
require.NoError(t, cli.CallContext(ctx, &result, c.method, args...))
|
||||
if len(c.byteArgs) > 0 {
|
||||
require.Equal(t, len(c.byteArgs), len(result))
|
||||
}
|
||||
if len(c.uintArgs) > 0 {
|
||||
require.Equal(t, int(c.uintArgs[1]), len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallCount(t *testing.T) {
|
||||
methods := []string{
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByHashV2,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetPayloadBodiesByRangeV2,
|
||||
}
|
||||
cases := []struct {
|
||||
method string
|
||||
count int
|
||||
}{
|
||||
{method: GetPayloadBodiesByHashV1, count: 1},
|
||||
{method: GetPayloadBodiesByHashV1, count: 2},
|
||||
{method: GetPayloadBodiesByHashV2, count: 1},
|
||||
{method: GetPayloadBodiesByRangeV1, count: 1},
|
||||
{method: GetPayloadBodiesByRangeV1, count: 2},
|
||||
{method: GetPayloadBodiesByRangeV2, count: 1},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
mockWriteResult(t, w, msg, nil)
|
||||
})
|
||||
for i := 0; i < c.count; i++ {
|
||||
require.NoError(t, cli.CallContext(context.Background(), nil, c.method))
|
||||
}
|
||||
for _, m := range methods {
|
||||
if m == c.method {
|
||||
require.Equal(t, c.count, srv.callCount(m))
|
||||
} else {
|
||||
require.Equal(t, 0, srv.callCount(m))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
250
beacon-chain/execution/payload_body.go
Normal file
250
beacon-chain/execution/payload_body.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errNilPayloadBody = errors.New("nil payload body for block")
|
||||
|
||||
type blockWithHeader struct {
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
|
||||
// reconstructionBatch is a map of block hashes to block numbers.
|
||||
type reconstructionBatch map[[32]byte]uint64
|
||||
|
||||
type blindedBlockReconstructor struct {
|
||||
orderedBlocks []*blockWithHeader
|
||||
bodies map[[32]byte]*pb.ExecutionPayloadBody
|
||||
batches map[string]reconstructionBatch
|
||||
}
|
||||
|
||||
func reconstructBlindedBlockBatch(ctx context.Context, client RPCClient, sbb []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
r, err := newBlindedBlockReconstructor(sbb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := r.requestBodies(ctx, client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.unblinded()
|
||||
}
|
||||
|
||||
func newBlindedBlockReconstructor(sbb []interfaces.ReadOnlySignedBeaconBlock) (*blindedBlockReconstructor, error) {
|
||||
r := &blindedBlockReconstructor{
|
||||
orderedBlocks: make([]*blockWithHeader, 0, len(sbb)),
|
||||
bodies: make(map[[32]byte]*pb.ExecutionPayloadBody),
|
||||
}
|
||||
for i := range sbb {
|
||||
if err := r.addToBatch(sbb[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) addToBatch(b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
}
|
||||
if !b.Block().IsBlinded() {
|
||||
return errors.New("can only reconstruct block from blinded block format")
|
||||
}
|
||||
header, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.IsNil() {
|
||||
return errors.New("execution payload header in blinded block was nil")
|
||||
}
|
||||
r.orderedBlocks = append(r.orderedBlocks, &blockWithHeader{block: b, header: header})
|
||||
blockHash := bytesutil.ToBytes32(header.BlockHash())
|
||||
if blockHash == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
|
||||
method := payloadBodyMethodForBlock(b)
|
||||
if r.batches == nil {
|
||||
r.batches = make(map[string]reconstructionBatch)
|
||||
}
|
||||
if _, ok := r.batches[method]; !ok {
|
||||
r.batches[method] = make(reconstructionBatch)
|
||||
}
|
||||
r.batches[method][bytesutil.ToBytes32(header.BlockHash())] = header.BlockNumber()
|
||||
return nil
|
||||
}
|
||||
|
||||
func payloadBodyMethodForBlock(b interface{ Version() int }) string {
|
||||
if b.Version() > version.Deneb {
|
||||
return GetPayloadBodiesByHashV2
|
||||
}
|
||||
return GetPayloadBodiesByHashV1
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) requestBodies(ctx context.Context, client RPCClient) error {
|
||||
for method := range r.batches {
|
||||
nilResults, err := r.requestBodiesByHash(ctx, client, method)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.handleNilResults(ctx, client, method, nilResults); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type hashBlockNumber struct {
|
||||
h [32]byte
|
||||
n uint64
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) handleNilResults(ctx context.Context, client RPCClient, method string, nilResults [][32]byte) error {
|
||||
if len(nilResults) == 0 {
|
||||
return nil
|
||||
}
|
||||
hbns := make([]hashBlockNumber, len(nilResults))
|
||||
for i := range nilResults {
|
||||
h := nilResults[i]
|
||||
hbns[i] = hashBlockNumber{h: h, n: r.batches[method][h]}
|
||||
}
|
||||
reqs := computeRanges(hbns)
|
||||
for i := range reqs {
|
||||
if err := r.requestBodiesByRange(ctx, client, rangeMethodForHashMethod(method), reqs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type byRangeReq struct {
|
||||
start uint64
|
||||
count uint64
|
||||
hbns []hashBlockNumber
|
||||
}
|
||||
|
||||
func computeRanges(hbns []hashBlockNumber) []byRangeReq {
|
||||
if len(hbns) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Slice(hbns, func(i, j int) bool {
|
||||
return hbns[i].n < hbns[j].n
|
||||
})
|
||||
ranges := make([]byRangeReq, 0)
|
||||
start := hbns[0].n
|
||||
count := uint64(0)
|
||||
for i := 0; i < len(hbns); i++ {
|
||||
if hbns[i].n == start+count {
|
||||
count++
|
||||
continue
|
||||
}
|
||||
ranges = append(ranges, byRangeReq{start: start, count: count, hbns: hbns[uint64(i)-count : i]})
|
||||
start = hbns[i].n
|
||||
count = 1
|
||||
}
|
||||
ranges = append(ranges, byRangeReq{start: start, count: count, hbns: hbns[uint64(len(hbns))-count:]})
|
||||
return ranges
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) requestBodiesByRange(ctx context.Context, client RPCClient, method string, req byRangeReq) error {
|
||||
result := make([]*pb.ExecutionPayloadBody, 0)
|
||||
if err := client.CallContext(ctx, &result, method, req.start, req.count); err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(len(result)) != req.count {
|
||||
return errors.Wrapf(errInvalidPayloadBodyResponse, "received %d payload bodies from %s with count=%d (start=%d)", len(result), method, req.count, req.start)
|
||||
}
|
||||
for i := range result {
|
||||
if result[i] == nil {
|
||||
return errors.Wrapf(errNilPayloadBody, "from %s, hash=%#x", method, req.hbns[i].h)
|
||||
}
|
||||
r.bodies[req.hbns[i].h] = result[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) requestBodiesByHash(ctx context.Context, client RPCClient, method string) ([][32]byte, error) {
|
||||
batch := r.batches[method]
|
||||
if len(batch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
hashes := make([]common.Hash, 0, len(batch))
|
||||
for h := range batch {
|
||||
if h == params.BeaconConfig().ZeroHash {
|
||||
continue
|
||||
}
|
||||
hashes = append(hashes, h)
|
||||
}
|
||||
result := make([]*pb.ExecutionPayloadBody, 0)
|
||||
if err := client.CallContext(ctx, &result, method, hashes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(hashes) != len(result) {
|
||||
return nil, errors.Wrapf(errInvalidPayloadBodyResponse, "received %d payload bodies for %d requested hashes", len(result), len(hashes))
|
||||
}
|
||||
nilBodies := make([][32]byte, 0)
|
||||
for i := range result {
|
||||
if result[i] == nil {
|
||||
nilBodies = append(nilBodies, hashes[i])
|
||||
continue
|
||||
}
|
||||
r.bodies[hashes[i]] = result[i]
|
||||
}
|
||||
return nilBodies, nil
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) payloadForHeader(header interfaces.ExecutionData, v int) (proto.Message, error) {
|
||||
bodyKey := bytesutil.ToBytes32(header.BlockHash())
|
||||
if bodyKey == params.BeaconConfig().ZeroHash {
|
||||
payload, err := buildEmptyExecutionPayload(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct payload for body hash %#x", bodyKey)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
body, ok := r.bodies[bodyKey]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errNilPayloadBody, "hash %#x", bodyKey)
|
||||
}
|
||||
ed, err := fullPayloadFromPayloadBody(header, body, v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct payload for body hash %#x", bodyKey)
|
||||
}
|
||||
return ed.Proto(), nil
|
||||
}
|
||||
|
||||
func (r *blindedBlockReconstructor) unblinded() ([]interfaces.SignedBeaconBlock, error) {
|
||||
unblinded := make([]interfaces.SignedBeaconBlock, len(r.orderedBlocks))
|
||||
for i := range r.orderedBlocks {
|
||||
blk, header := r.orderedBlocks[i].block, r.orderedBlocks[i].header
|
||||
payload, err := r.payloadForHeader(header, blk.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
full, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blk, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build full block from execution payload for block hash %#x", header.BlockHash())
|
||||
}
|
||||
unblinded[i] = full
|
||||
}
|
||||
return unblinded, nil
|
||||
}
|
||||
|
||||
func rangeMethodForHashMethod(method string) string {
|
||||
if method == GetPayloadBodiesByHashV2 {
|
||||
return GetPayloadBodiesByRangeV2
|
||||
}
|
||||
return GetPayloadBodiesByRangeV1
|
||||
}
|
||||
408
beacon-chain/execution/payload_body_test.go
Normal file
408
beacon-chain/execution/payload_body_test.go
Normal file
@@ -0,0 +1,408 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
type versioner struct {
|
||||
version int
|
||||
}
|
||||
|
||||
func (v versioner) Version() int {
|
||||
return v.version
|
||||
}
|
||||
|
||||
func TestPayloadBodyMethodForBlock(t *testing.T) {
|
||||
cases := []struct {
|
||||
versions []int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
versions: []int{version.Phase0, version.Altair, version.Bellatrix, version.Capella, version.Deneb},
|
||||
want: GetPayloadBodiesByHashV1,
|
||||
},
|
||||
{
|
||||
versions: []int{version.Electra},
|
||||
want: GetPayloadBodiesByHashV2,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
for _, v := range c.versions {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
v := versioner{version: v}
|
||||
require.Equal(t, c.want, payloadBodyMethodForBlock(v))
|
||||
})
|
||||
}
|
||||
}
|
||||
t.Run("post-electra", func(t *testing.T) {
|
||||
require.Equal(t, GetPayloadBodiesByHashV2, payloadBodyMethodForBlock(versioner{version: version.Electra + 1}))
|
||||
})
|
||||
}
|
||||
|
||||
func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPayloadBody {
|
||||
body := &pb.ExecutionPayloadBody{}
|
||||
txs, err := ed.Transactions()
|
||||
require.NoError(t, err)
|
||||
wd, err := ed.Withdrawals()
|
||||
// Bellatrix does not have withdrawals and will return an error.
|
||||
if err == nil {
|
||||
body.Withdrawals = wd
|
||||
}
|
||||
for i := range txs {
|
||||
body.Transactions = append(body.Transactions, txs[i])
|
||||
}
|
||||
eed, isElectra := ed.(interfaces.ExecutionDataElectra)
|
||||
if isElectra {
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositReceipts())
|
||||
body.WithdrawalRequests = pb.ProtoWithdrawalRequestsToJson(eed.WithdrawalRequests())
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
type blindedBlockFixtures struct {
|
||||
denebBlock *fullAndBlinded
|
||||
emptyDenebBlock *fullAndBlinded
|
||||
afterSkipDeneb *fullAndBlinded
|
||||
electra *fullAndBlinded
|
||||
}
|
||||
|
||||
type fullAndBlinded struct {
|
||||
full interfaces.ReadOnlySignedBeaconBlock
|
||||
blinded *blockWithHeader
|
||||
}
|
||||
|
||||
func blindedBlockWithHeader(t *testing.T, b interfaces.ReadOnlySignedBeaconBlock) *fullAndBlinded {
|
||||
header, err := b.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
blinded, err := b.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
return &fullAndBlinded{
|
||||
full: b,
|
||||
blinded: &blockWithHeader{
|
||||
block: blinded,
|
||||
header: header,
|
||||
}}
|
||||
}
|
||||
|
||||
func denebSlot(t *testing.T) primitives.Slot {
|
||||
s, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func electraSlot(t *testing.T) primitives.Slot {
|
||||
s, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testBlindedBlockFixtures(t *testing.T) *blindedBlockFixtures {
|
||||
pfx := fixturesStruct()
|
||||
fx := &blindedBlockFixtures{}
|
||||
full := pfx.ExecutionPayloadDeneb
|
||||
// this func overrides fixture blockhashes to ensure they are unique
|
||||
full.BlockHash = bytesutil.PadTo([]byte("full"), 32)
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, denebSlot(t), 0, util.WithPayloadSetter(full))
|
||||
fx.denebBlock = blindedBlockWithHeader(t, denebBlock)
|
||||
|
||||
empty := pfx.EmptyExecutionPayloadDeneb
|
||||
empty.BlockHash = bytesutil.PadTo([]byte("empty"), 32)
|
||||
empty.BlockNumber = 2
|
||||
emptyDenebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, denebSlot(t)+1, 0, util.WithPayloadSetter(empty))
|
||||
fx.emptyDenebBlock = blindedBlockWithHeader(t, emptyDenebBlock)
|
||||
|
||||
afterSkip := fixturesStruct().ExecutionPayloadDeneb
|
||||
// this func overrides fixture blockhashes to ensure they are unique
|
||||
afterSkip.BlockHash = bytesutil.PadTo([]byte("afterSkip"), 32)
|
||||
afterSkip.BlockNumber = 4
|
||||
afterSkipBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, denebSlot(t)+3, 0, util.WithPayloadSetter(afterSkip))
|
||||
fx.afterSkipDeneb = blindedBlockWithHeader(t, afterSkipBlock)
|
||||
|
||||
electra := fixturesStruct().ExecutionPayloadElectra
|
||||
electra.BlockHash = bytesutil.PadTo([]byte("electra"), 32)
|
||||
electra.BlockNumber = 5
|
||||
electraBlock, _ := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, electraSlot(t), 0, util.WithElectraPayload(electra))
|
||||
fx.electra = blindedBlockWithHeader(t, electraBlock)
|
||||
return fx
|
||||
}
|
||||
|
||||
func TestPayloadBodiesViaUnblinder(t *testing.T) {
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
t.Run("mix of non-empty and empty", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.denebBlock.blinded.header),
|
||||
payloadToBody(t, fx.emptyDenebBlock.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
ctx := context.Background()
|
||||
|
||||
toUnblind := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.emptyDenebBlock.blinded.block,
|
||||
}
|
||||
bbr, err := newBlindedBlockReconstructor(toUnblind)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bbr.requestBodies(ctx, cli))
|
||||
|
||||
payload, err := bbr.payloadForHeader(fx.denebBlock.blinded.header, fx.denebBlock.blinded.block.Version())
|
||||
require.NoError(t, err)
|
||||
unblindFull, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(fx.denebBlock.blinded.block, payload)
|
||||
require.NoError(t, err)
|
||||
testAssertReconstructedEquivalent(t, fx.denebBlock.full, unblindFull)
|
||||
|
||||
emptyPayload, err := bbr.payloadForHeader(fx.emptyDenebBlock.blinded.header, fx.emptyDenebBlock.blinded.block.Version())
|
||||
require.NoError(t, err)
|
||||
unblindEmpty, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(fx.emptyDenebBlock.blinded.block, emptyPayload)
|
||||
require.NoError(t, err)
|
||||
testAssertReconstructedEquivalent(t, fx.emptyDenebBlock.full, unblindEmpty)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFixtureEquivalence(t *testing.T) {
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
t.Run("full and blinded block equivalence", func(t *testing.T) {
|
||||
testAssertReconstructedEquivalent(t, fx.denebBlock.blinded.block, fx.denebBlock.full)
|
||||
testAssertReconstructedEquivalent(t, fx.emptyDenebBlock.blinded.block, fx.emptyDenebBlock.full)
|
||||
})
|
||||
}
|
||||
|
||||
func testAssertReconstructedEquivalent(t *testing.T, b, ogb interfaces.ReadOnlySignedBeaconBlock) {
|
||||
bHtr, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ogbHtr, err := ogb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bHtr, ogbHtr)
|
||||
}
|
||||
|
||||
func TestComputeRanges(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
hbns []hashBlockNumber
|
||||
want []byRangeReq
|
||||
}{
|
||||
{
|
||||
name: "3 contiguous, 1 not",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{5}, n: 5},
|
||||
{h: [32]byte{3}, n: 3},
|
||||
{h: [32]byte{2}, n: 2},
|
||||
{h: [32]byte{1}, n: 1},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 3, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}, {h: [32]byte{2}, n: 2}, {h: [32]byte{3}, n: 3}}},
|
||||
{start: 5, count: 1, hbns: []hashBlockNumber{{h: [32]byte{5}, n: 5}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1 element",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{1}, n: 1},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 1, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 contiguous",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{2}, n: 2},
|
||||
{h: [32]byte{1}, n: 1},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 2, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}, {h: [32]byte{2}, n: 2}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 non-contiguous",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{3}, n: 3},
|
||||
{h: [32]byte{1}, n: 1},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 1, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}}},
|
||||
{start: 3, count: 1, hbns: []hashBlockNumber{{h: [32]byte{3}, n: 3}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 contiguous",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{2}, n: 2},
|
||||
{h: [32]byte{1}, n: 1},
|
||||
{h: [32]byte{3}, n: 3},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 3, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}, {h: [32]byte{2}, n: 2}, {h: [32]byte{3}, n: 3}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 non-contiguous",
|
||||
hbns: []hashBlockNumber{
|
||||
{h: [32]byte{5}, n: 5},
|
||||
{h: [32]byte{3}, n: 3},
|
||||
{h: [32]byte{1}, n: 1},
|
||||
},
|
||||
want: []byRangeReq{
|
||||
{start: 1, count: 1, hbns: []hashBlockNumber{{h: [32]byte{1}, n: 1}}},
|
||||
{start: 3, count: 1, hbns: []hashBlockNumber{{h: [32]byte{3}, n: 3}}},
|
||||
{start: 5, count: 1, hbns: []hashBlockNumber{{h: [32]byte{5}, n: 5}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
got := computeRanges(c.hbns)
|
||||
for i := range got {
|
||||
require.Equal(t, c.want[i].start, got[i].start)
|
||||
require.Equal(t, c.want[i].count, got[i].count)
|
||||
require.DeepEqual(t, c.want[i].hbns, got[i].hbns)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructBlindedBlockBatchFallbackToRange(t *testing.T) {
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
ctx := context.Background()
|
||||
t.Run("fallback fails", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil, nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByRangeV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil, nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
toUnblind := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.emptyDenebBlock.blinded.block,
|
||||
}
|
||||
_, err := reconstructBlindedBlockBatch(ctx, cli, toUnblind)
|
||||
require.ErrorIs(t, err, errNilPayloadBody)
|
||||
require.Equal(t, 1, srv.callCount(GetPayloadBodiesByHashV1))
|
||||
require.Equal(t, 1, srv.callCount(GetPayloadBodiesByRangeV1))
|
||||
})
|
||||
t.Run("fallback succeeds", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil, nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByRangeV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.denebBlock.blinded.header),
|
||||
payloadToBody(t, fx.emptyDenebBlock.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
unblind := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.emptyDenebBlock.blinded.block,
|
||||
}
|
||||
_, err := reconstructBlindedBlockBatch(ctx, cli, unblind)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("separated by block number gap", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil, nil, nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByRangeV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
p := mockParseUintList(t, msg.Params)
|
||||
require.Equal(t, 2, len(p))
|
||||
start, count := p[0], p[1]
|
||||
// Return first 2 blocks by number, which are contiguous.
|
||||
if start == fx.denebBlock.blinded.header.BlockNumber() {
|
||||
require.Equal(t, uint64(2), count)
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.denebBlock.blinded.header),
|
||||
payloadToBody(t, fx.emptyDenebBlock.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
return
|
||||
}
|
||||
// Assume it's the second request
|
||||
require.Equal(t, fx.afterSkipDeneb.blinded.header.BlockNumber(), start)
|
||||
require.Equal(t, uint64(1), count)
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.afterSkipDeneb.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
// separate methods for the electra block
|
||||
srv.register(GetPayloadBodiesByHashV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByRangeV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
p := mockParseUintList(t, msg.Params)
|
||||
require.Equal(t, 2, len(p))
|
||||
start, count := p[0], p[1]
|
||||
require.Equal(t, fx.electra.blinded.header.BlockNumber(), start)
|
||||
require.Equal(t, uint64(1), count)
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.electra.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
blind := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.emptyDenebBlock.blinded.block,
|
||||
fx.afterSkipDeneb.blinded.block,
|
||||
}
|
||||
unblind, err := reconstructBlindedBlockBatch(ctx, cli, blind)
|
||||
require.NoError(t, err)
|
||||
for i := range unblind {
|
||||
testAssertReconstructedEquivalent(t, blind[i], unblind[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructBlindedBlockBatchDenebAndElectra(t *testing.T) {
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
t.Run("deneb and electra", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
// The reconstructed should make separate calls for the deneb (v1) and electra (v2) blocks.
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{payloadToBody(t, fx.denebBlock.blinded.header)}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByHashV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{payloadToBody(t, fx.electra.blinded.header)}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
blinded := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.electra.blinded.block,
|
||||
}
|
||||
unblinded, err := reconstructBlindedBlockBatch(context.Background(), cli, blinded)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(blinded), len(unblinded))
|
||||
for i := range unblinded {
|
||||
testAssertReconstructedEquivalent(t, blinded[i], unblinded[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
@@ -23,26 +22,20 @@ type EngineClient struct {
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionPayloadElectra *pb.ExecutionPayloadElectra
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
ErrGetPayload error
|
||||
ExecutionPayloadByBlockHash map[[32]byte]*pb.ExecutionPayload
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
NumReconstructedPayloads uint64
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
BuilderOverride bool
|
||||
OverrideValidHash [32]byte
|
||||
BlockValue uint64
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
GetPayloadResponse *blocks.GetPayloadResponse
|
||||
ErrGetPayload error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -61,33 +54,8 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadElectra(e.ExecutionPayloadElectra, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, e.BlobsBundle, e.BuilderOverride, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, e.BlobsBundle, e.BuilderOverride, nil
|
||||
}
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, big.NewInt(int64(e.BlockValue)))
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return ed, nil, e.BuilderOverride, nil
|
||||
}
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
return p, nil, e.BuilderOverride, e.ErrGetPayload
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (*blocks.GetPayloadResponse, error) {
|
||||
return e.GetPayloadResponse, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// LatestExecutionBlock --
|
||||
|
||||
@@ -33,7 +33,7 @@ func (s *Service) canUpdateAttestedValidator(idx primitives.ValidatorIndex, slot
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that participated in the given aggregated attestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att interfaces.Attestation) ([]uint64, error) {
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att ethpb.Att) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,7 +63,7 @@ func (s *Service) processAttestations(ctx context.Context, state state.BeaconSta
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event for the tracked validators' and their latest attestation gets processed.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att interfaces.Attestation) {
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att ethpb.Att) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
@@ -161,7 +161,7 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an unaggregated attestation from tracked validator.
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att interfaces.Attestation) {
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att ethpb.Att) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
root := bytesutil.ToBytes32(att.GetData().BeaconBlockRoot)
|
||||
|
||||
@@ -121,17 +121,20 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, slashing := range blk.Body().AttesterSlashings() {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
|
||||
data1 := slashing.FirstAttestation().GetData()
|
||||
data2 := slashing.SecondAttestation().GetData()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attesterIndex": idx,
|
||||
"blockInclusionSlot": blk.Slot(),
|
||||
"attestationSlot1": slashing.GetFirstAttestation().GetData().Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.GetFirstAttestation().GetData().BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.GetFirstAttestation().GetData().Source.Epoch,
|
||||
"targetEpoch1": slashing.GetFirstAttestation().GetData().Target.Epoch,
|
||||
"attestationSlot2": slashing.GetSecondAttestation().GetData().Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.GetSecondAttestation().GetData().BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.GetSecondAttestation().GetData().Source.Epoch,
|
||||
"targetEpoch2": slashing.GetSecondAttestation().GetData().Target.Epoch,
|
||||
"attestationSlot1": data1.Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(data1.BeaconBlockRoot)),
|
||||
"sourceEpoch1": data1.Source.Epoch,
|
||||
"targetEpoch1": data1.Target.Epoch,
|
||||
"attestationSlot2": data2.Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(data2.BeaconBlockRoot)),
|
||||
"sourceEpoch2": data2.Source.Epoch,
|
||||
"targetEpoch2": data2.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -992,6 +992,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -20,9 +20,9 @@ go_library(
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -49,7 +49,6 @@ go_test(
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -15,9 +15,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -39,7 +39,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -28,11 +28,11 @@ func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error
|
||||
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
|
||||
}
|
||||
|
||||
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []interfaces.Attestation) error {
|
||||
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []ethpb.Att) error {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
|
||||
defer span.End()
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]interfaces.Attestation, len(unaggregatedAtts))
|
||||
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(unaggregatedAtts))
|
||||
for _, att := range unaggregatedAtts {
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -66,12 +66,12 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
|
||||
// returns the unaggregated attestations that weren't able to aggregate.
|
||||
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]interfaces.Attestation, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
var leftoverLock sync.Mutex
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
|
||||
ch := make(chan []interfaces.Attestation, n)
|
||||
ch := make(chan []ethpb.Att, n)
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
@@ -87,7 +87,7 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]interfaces.Attestation
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(aggregated) {
|
||||
if err := c.SaveAggregatedAttestations([]interfaces.Attestation{aggregated}); err != nil {
|
||||
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
|
||||
log.WithError(err).Error("could not save aggregated attestation")
|
||||
continue
|
||||
}
|
||||
@@ -116,7 +116,7 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]interfaces.Attestation
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestation saves an aggregated attestation in cache.
|
||||
func (c *AttCaches) SaveAggregatedAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -143,12 +143,12 @@ func (c *AttCaches) SaveAggregatedAttestation(att interfaces.Attestation) error
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
}
|
||||
copiedAtt := interfaces.CopyAttestation(att)
|
||||
copiedAtt := att.Copy()
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
atts, ok := c.aggregatedAtt[r]
|
||||
if !ok {
|
||||
atts := []interfaces.Attestation{copiedAtt}
|
||||
atts := []ethpb.Att{copiedAtt}
|
||||
c.aggregatedAtt[r] = atts
|
||||
return nil
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att interfaces.Attestation) error
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestations saves a list of aggregated attestations in cache.
|
||||
func (c *AttCaches) SaveAggregatedAttestations(atts []interfaces.Attestation) error {
|
||||
func (c *AttCaches) SaveAggregatedAttestations(atts []ethpb.Att) error {
|
||||
for _, att := range atts {
|
||||
if err := c.SaveAggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
@@ -176,11 +176,11 @@ func (c *AttCaches) SaveAggregatedAttestations(atts []interfaces.Attestation) er
|
||||
}
|
||||
|
||||
// AggregatedAttestations returns the aggregated attestations in cache.
|
||||
func (c *AttCaches) AggregatedAttestations() []interfaces.Attestation {
|
||||
func (c *AttCaches) AggregatedAttestations() []ethpb.Att {
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
|
||||
atts := make([]interfaces.Attestation, 0)
|
||||
atts := make([]ethpb.Att, 0)
|
||||
|
||||
for _, a := range c.aggregatedAtt {
|
||||
atts = append(atts, a...)
|
||||
@@ -191,11 +191,11 @@ func (c *AttCaches) AggregatedAttestations() []interfaces.Attestation {
|
||||
|
||||
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []interfaces.Attestation {
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]interfaces.Attestation, 0)
|
||||
atts := make([]ethpb.Att, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
@@ -209,7 +209,7 @@ func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot
|
||||
}
|
||||
|
||||
// DeleteAggregatedAttestation deletes the aggregated attestations in cache.
|
||||
func (c *AttCaches) DeleteAggregatedAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (c *AttCaches) DeleteAggregatedAttestation(att interfaces.Attestation) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
filtered := make([]interfaces.Attestation, 0)
|
||||
filtered := make([]ethpb.Att, 0)
|
||||
for _, a := range attList {
|
||||
if c, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
|
||||
return err
|
||||
@@ -250,7 +250,7 @@ func (c *AttCaches) DeleteAggregatedAttestation(att interfaces.Attestation) erro
|
||||
}
|
||||
|
||||
// HasAggregatedAttestation checks if the input attestations has already existed in cache.
|
||||
func (c *AttCaches) HasAggregatedAttestation(att interfaces.Attestation) (bool, error) {
|
||||
func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -31,7 +30,7 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
att6 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att7 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att8 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4, att5, att6, att7, att8}
|
||||
atts := []ethpb.Att{att1, att2, att3, att4, att5, att6, att7, att8}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
require.NoError(t, cache.AggregateUnaggregatedAttestations(context.Background()))
|
||||
|
||||
@@ -42,7 +41,7 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
att interfaces.Attestation
|
||||
att ethpb.Att
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
@@ -119,13 +118,13 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []interfaces.Attestation
|
||||
atts []ethpb.Att
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
atts: []interfaces.Attestation{
|
||||
atts: []ethpb.Att{
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
@@ -154,13 +153,13 @@ func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations_SomeGoodSomeBad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []interfaces.Attestation
|
||||
atts []ethpb.Att
|
||||
count int
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "the first attestation is bad",
|
||||
atts: []interfaces.Attestation{
|
||||
atts: []ethpb.Att{
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
@@ -192,7 +191,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []interfaces.Attestation{att1, att2, att3}
|
||||
atts := []ethpb.Att{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
@@ -247,13 +246,13 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b10101}})
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4}
|
||||
atts := []ethpb.Att{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att1))
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att3))
|
||||
|
||||
returned := cache.AggregatedAttestations()
|
||||
wanted := []interfaces.Attestation{att2}
|
||||
wanted := []ethpb.Att{att2}
|
||||
assert.DeepEqual(t, wanted, returned)
|
||||
})
|
||||
|
||||
@@ -263,14 +262,14 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110111}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110100}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
atts := []interfaces.Attestation{att1, att2, att3, att4}
|
||||
atts := []ethpb.Att{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
|
||||
assert.Equal(t, 2, cache.AggregatedAttestationCount(), "Unexpected number of atts")
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att4))
|
||||
|
||||
returned := cache.AggregatedAttestations()
|
||||
wanted := []interfaces.Attestation{att1, att2}
|
||||
wanted := []ethpb.Att{att1, att2}
|
||||
sort.Slice(returned, func(i, j int) bool {
|
||||
return string(returned[i].GetAggregationBits()) < string(returned[j].GetAggregationBits())
|
||||
})
|
||||
@@ -281,7 +280,7 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existing []interfaces.Attestation
|
||||
existing []ethpb.Att
|
||||
input *ethpb.Attestation
|
||||
want bool
|
||||
err error
|
||||
@@ -320,7 +319,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with exact match",
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
existing: []ethpb.Att{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -335,7 +334,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with subset aggregation",
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
existing: []ethpb.Att{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -350,7 +349,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "single attestation in cache with superset aggregation",
|
||||
existing: []interfaces.Attestation{ðpb.Attestation{
|
||||
existing: []ethpb.Att{ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
@@ -365,7 +364,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation, input is subset",
|
||||
existing: []interfaces.Attestation{
|
||||
existing: []ethpb.Att{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -388,7 +387,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation and input is superset",
|
||||
existing: []interfaces.Attestation{
|
||||
existing: []ethpb.Att{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -411,7 +410,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple attestations with different data in cache",
|
||||
existing: []interfaces.Attestation{
|
||||
existing: []ethpb.Att{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
@@ -434,7 +433,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "attestations with different bitlist lengths",
|
||||
existing: []interfaces.Attestation{
|
||||
existing: []ethpb.Att{
|
||||
ðpb.Attestation{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
|
||||
@@ -2,11 +2,11 @@ package kv
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SaveBlockAttestation saves an block attestation in cache.
|
||||
func (c *AttCaches) SaveBlockAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -19,7 +19,7 @@ func (c *AttCaches) SaveBlockAttestation(att interfaces.Attestation) error {
|
||||
defer c.blockAttLock.Unlock()
|
||||
atts, ok := c.blockAtt[r]
|
||||
if !ok {
|
||||
atts = make([]interfaces.Attestation, 0, 1)
|
||||
atts = make([]ethpb.Att, 0, 1)
|
||||
}
|
||||
|
||||
// Ensure that this attestation is not already fully contained in an existing attestation.
|
||||
@@ -31,14 +31,14 @@ func (c *AttCaches) SaveBlockAttestation(att interfaces.Attestation) error {
|
||||
}
|
||||
}
|
||||
|
||||
c.blockAtt[r] = append(atts, interfaces.CopyAttestation(att))
|
||||
c.blockAtt[r] = append(atts, att.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockAttestations returns the block attestations in cache.
|
||||
func (c *AttCaches) BlockAttestations() []interfaces.Attestation {
|
||||
atts := make([]interfaces.Attestation, 0)
|
||||
func (c *AttCaches) BlockAttestations() []ethpb.Att {
|
||||
atts := make([]ethpb.Att, 0)
|
||||
|
||||
c.blockAttLock.RLock()
|
||||
defer c.blockAttLock.RUnlock()
|
||||
@@ -50,7 +50,7 @@ func (c *AttCaches) BlockAttestations() []interfaces.Attestation {
|
||||
}
|
||||
|
||||
// DeleteBlockAttestation deletes a block attestation in cache.
|
||||
func (c *AttCaches) DeleteBlockAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -18,7 +17,7 @@ func TestKV_BlockAttestation_CanSaveRetrieve(t *testing.T) {
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []interfaces.Attestation{att1, att2, att3}
|
||||
atts := []ethpb.Att{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveBlockAttestation(att))
|
||||
@@ -44,7 +43,7 @@ func TestKV_BlockAttestation_CanDelete(t *testing.T) {
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []interfaces.Attestation{att1, att2, att3}
|
||||
atts := []ethpb.Att{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveBlockAttestation(att))
|
||||
@@ -54,6 +53,6 @@ func TestKV_BlockAttestation_CanDelete(t *testing.T) {
|
||||
require.NoError(t, cache.DeleteBlockAttestation(att3))
|
||||
|
||||
returned := cache.BlockAttestations()
|
||||
wanted := []interfaces.Attestation{att2}
|
||||
wanted := []ethpb.Att{att2}
|
||||
assert.DeepEqual(t, wanted, returned)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ package kv
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
|
||||
func (c *AttCaches) SaveForkchoiceAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -15,16 +15,15 @@ func (c *AttCaches) SaveForkchoiceAttestation(att interfaces.Attestation) error
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
}
|
||||
|
||||
att = interfaces.CopyAttestation(att)
|
||||
c.forkchoiceAttLock.Lock()
|
||||
defer c.forkchoiceAttLock.Unlock()
|
||||
c.forkchoiceAtt[r] = att
|
||||
c.forkchoiceAtt[r] = att.Copy()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestations saves a list of forkchoice attestations in cache.
|
||||
func (c *AttCaches) SaveForkchoiceAttestations(atts []interfaces.Attestation) error {
|
||||
func (c *AttCaches) SaveForkchoiceAttestations(atts []ethpb.Att) error {
|
||||
for _, att := range atts {
|
||||
if err := c.SaveForkchoiceAttestation(att); err != nil {
|
||||
return err
|
||||
@@ -35,20 +34,20 @@ func (c *AttCaches) SaveForkchoiceAttestations(atts []interfaces.Attestation) er
|
||||
}
|
||||
|
||||
// ForkchoiceAttestations returns the forkchoice attestations in cache.
|
||||
func (c *AttCaches) ForkchoiceAttestations() []interfaces.Attestation {
|
||||
func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
|
||||
c.forkchoiceAttLock.RLock()
|
||||
defer c.forkchoiceAttLock.RUnlock()
|
||||
|
||||
atts := make([]interfaces.Attestation, 0, len(c.forkchoiceAtt))
|
||||
atts := make([]ethpb.Att, 0, len(c.forkchoiceAtt))
|
||||
for _, att := range c.forkchoiceAtt {
|
||||
atts = append(atts, interfaces.CopyAttestation(att) /* Copied */)
|
||||
atts = append(atts, att.Copy())
|
||||
}
|
||||
|
||||
return atts
|
||||
}
|
||||
|
||||
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
|
||||
func (c *AttCaches) DeleteForkchoiceAttestation(att interfaces.Attestation) error {
|
||||
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user