mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-14 07:48:08 -05:00
Compare commits
1 Commits
develop
...
geth-v1.16
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a12d3777d |
@@ -72,7 +72,7 @@ Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should li
|
||||
|
||||
To enable conditional compilation and custom configuration for tests (where compiled code has more
|
||||
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
|
||||
(see official docs on [build constraints](https://pkg.go.dev/go/build#hdr-Build_Constraints)).
|
||||
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
|
||||
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -9,7 +9,7 @@ This README details how to setup Prysm for interop testing for usage with other
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install [Bazel](https://bazel.build/install) **(Recommended)**
|
||||
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
|
||||
2. `git clone https://github.com/OffchainLabs/prysm && cd prysm`
|
||||
3. `bazel build //cmd/...`
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://go.dev/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
|
||||
See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
@@ -23,7 +23,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://prysm.offchainlabs.com/docs/)**.
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
|
||||
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
|
||||
|
||||
@@ -51,7 +51,7 @@ Prysm maintains two permanent branches:
|
||||
|
||||
### 🛠 Contribution Guide
|
||||
|
||||
Want to get involved? Check out our **[Contribution Guide](https://prysm.offchainlabs.com/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -238,9 +238,14 @@ func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte
|
||||
// AddInMixin describes a method from which a length mixin is added to the
|
||||
// provided root.
|
||||
func AddInMixin(root [32]byte, length uint64) ([32]byte, error) {
|
||||
var rootBufRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(rootBufRoot[:], length)
|
||||
return ssz.MixInLength(root, rootBufRoot[:]), nil
|
||||
rootBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(rootBuf, binary.LittleEndian, length); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal eth1data votes length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
rootBufRoot := make([]byte, 32)
|
||||
copy(rootBufRoot, rootBuf.Bytes())
|
||||
return ssz.MixInLength(root, rootBufRoot), nil
|
||||
}
|
||||
|
||||
// Merkleize 32-byte leaves into a Merkle trie for its adequate depth, returning
|
||||
|
||||
@@ -588,12 +588,6 @@ func fcReturnsTargetRoot(root [32]byte) func([32]byte, primitives.Epoch) ([32]by
|
||||
}
|
||||
}
|
||||
|
||||
func fcReturnsDependentRoot() func([32]byte, primitives.Epoch) ([32]byte, error) {
|
||||
return func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
return root, nil
|
||||
}
|
||||
}
|
||||
|
||||
type mockSignatureCache struct {
|
||||
svCalledForSig map[signatureData]bool
|
||||
svcb func(sig signatureData) (bool, error)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -293,57 +293,55 @@ func (dv *RODataColumnsVerifier) ValidProposerSignature(ctx context.Context) (er
|
||||
// The returned state is guaranteed to be at the same epoch as the data column's epoch, and have the same randao mix and active
|
||||
// validator indices as the data column's parent state advanced to the data column's slot.
|
||||
func (dv *RODataColumnsVerifier) getVerifyingState(ctx context.Context, dataColumn blocks.RODataColumn) (state.ReadOnlyBeaconState, error) {
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch == 0 {
|
||||
return dv.hsp.HeadStateReadOnly(ctx)
|
||||
}
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
dcDependentRoot, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headRoot, err := dv.hsp.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headDependentRoot, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dcDependentRoot == headDependentRoot {
|
||||
headSlot := dv.hsp.HeadSlot()
|
||||
headEpoch := slots.ToEpoch(headSlot)
|
||||
if headEpoch == dataColumnEpoch || headEpoch == dataColumnEpoch-1 {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
headSlot := dv.hsp.HeadSlot()
|
||||
headEpoch := slots.ToEpoch(headSlot)
|
||||
|
||||
// Use head if it's the parent
|
||||
if bytes.Equal(parentRoot[:], headRoot) {
|
||||
// If they are in the same epoch, then we can return the head state directly
|
||||
if dataColumnEpoch == headEpoch {
|
||||
return dv.hsp.HeadStateReadOnly(ctx)
|
||||
}
|
||||
if headEpoch+1 < dataColumnEpoch {
|
||||
headState, err := dv.hsp.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
|
||||
// Otherwise, we need to process the head state to the data column's slot
|
||||
headState, err := dv.hsp.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
|
||||
}
|
||||
|
||||
// If head and data column are in the same epoch and head is compatible with the parent's depdendent root, then use head
|
||||
if dataColumnEpoch == headEpoch {
|
||||
headDependent, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentDependent, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytes.Equal(headDependent[:], parentDependent[:]) {
|
||||
return dv.hsp.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"slot": dataColumnSlot,
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
}).Debug("Replying state for data column verification")
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
// Otherwise retrieve the parent state and advance it to the data column's slot
|
||||
parentState, err := dv.sr.StateByRoot(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetState, err := dv.sr.StateByRoot(ctx, targetRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
parentEpoch := slots.ToEpoch(parentState.Slot())
|
||||
if dataColumnEpoch == parentEpoch {
|
||||
return parentState, nil
|
||||
}
|
||||
targetEpoch := slots.ToEpoch(targetState.Slot())
|
||||
if targetEpoch == dataColumnEpoch || targetEpoch == dataColumnEpoch-1 {
|
||||
return targetState, nil
|
||||
}
|
||||
return transition.ProcessSlotsUsingNextSlotCache(ctx, targetState, parentRoot[:], dataColumnSlot)
|
||||
return transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], dataColumnSlot)
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) (err error) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -10,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -283,7 +281,7 @@ func TestColumnSlotAboveFinalized(t *testing.T) {
|
||||
|
||||
func TestValidProposerSignature(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -296,83 +294,59 @@ func TestValidProposerSignature(t *testing.T) {
|
||||
// The signature data does not depend on the data column itself, so we can use the first one.
|
||||
expectedSignatureData := columnToSignatureData(firstColumn)
|
||||
|
||||
// Create a proper Fulu state for verification.
|
||||
// We need enough validators to cover the proposer index.
|
||||
numValidators := max(uint64(firstColumn.ProposerIndex()+1), 64)
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
|
||||
|
||||
// Head state provider that returns the fuluState via HeadStateReadOnly path.
|
||||
headStateWithState := &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot,
|
||||
headStateReadOnly: fuluState,
|
||||
}
|
||||
|
||||
// Head state provider that will fail (headStateReadOnly is nil).
|
||||
headStateNotFound := &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
isError bool
|
||||
vscbShouldError bool
|
||||
svcbReturn bool
|
||||
stateByRooter StateByRooter
|
||||
headStateProvider *mockHeadStateProvider
|
||||
vscbError error
|
||||
svcbError error
|
||||
name string
|
||||
isError bool
|
||||
vscbShouldError bool
|
||||
svcbReturn bool
|
||||
stateByRooter StateByRooter
|
||||
vscbError error
|
||||
svcbError error
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "cache hit - success",
|
||||
svcbReturn: true,
|
||||
svcbError: nil,
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
headStateProvider: headStateWithState,
|
||||
isError: false,
|
||||
name: "cache hit - success",
|
||||
svcbReturn: true,
|
||||
svcbError: nil,
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "cache hit - error",
|
||||
svcbReturn: true,
|
||||
svcbError: errors.New("derp"),
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
headStateProvider: headStateWithState,
|
||||
isError: true,
|
||||
name: "cache hit - error",
|
||||
svcbReturn: true,
|
||||
svcbError: errors.New("derp"),
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "cache miss - success",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
|
||||
headStateProvider: headStateWithState,
|
||||
isError: false,
|
||||
name: "cache miss - success",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "cache miss - state not found",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
|
||||
headStateProvider: headStateNotFound,
|
||||
isError: true,
|
||||
name: "cache miss - state not found",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "cache miss - signature failure",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: errors.New("signature, not so good!"),
|
||||
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
|
||||
headStateProvider: headStateWithState,
|
||||
isError: true,
|
||||
name: "cache miss - signature failure",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: errors.New("signature, not so good!"),
|
||||
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -403,10 +377,9 @@ func TestValidProposerSignature(t *testing.T) {
|
||||
shared: &sharedResources{
|
||||
sc: signatureCache,
|
||||
sr: tc.stateByRooter,
|
||||
hsp: tc.headStateProvider,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
DependentRootForEpochCB: fcReturnsDependentRoot(),
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -432,7 +405,7 @@ func TestValidProposerSignature(t *testing.T) {
|
||||
|
||||
func TestDataColumnsSidecarParentSeen(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -536,7 +509,7 @@ func TestDataColumnsSidecarParentValid(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -657,7 +630,7 @@ func TestDataColumnsSidecarDescendsFromFinalized(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -720,7 +693,7 @@ func TestDataColumnsSidecarInclusionProven(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -775,7 +748,7 @@ func TestDataColumnsSidecarKzgProofVerified(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
@@ -951,135 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 97 // epoch 3
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
|
||||
// Create a proper Fulu state for verification.
|
||||
numValidators := max(uint64(columns[0].ProposerIndex()+1), 64)
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
|
||||
|
||||
t.Run("different dependent roots - uses StateByRoot path", func(t *testing.T) {
|
||||
// Parent and head are on different forks with different dependent roots.
|
||||
// This forces the code to use TargetRootForEpoch -> StateByRoot path.
|
||||
signatureCache := &mockSignatureCache{
|
||||
svcb: func(signatureData signatureData) (bool, error) {
|
||||
return false, nil // Cache miss
|
||||
},
|
||||
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
|
||||
return nil // Signature valid
|
||||
},
|
||||
}
|
||||
|
||||
// StateByRoot will be called because dependent roots differ
|
||||
stateByRootCalled := false
|
||||
stateByRooter := &mockStateByRooter{
|
||||
sbr: func(_ context.Context, root [32]byte) (state.BeaconState, error) {
|
||||
stateByRootCalled = true
|
||||
return fuluState, nil
|
||||
},
|
||||
}
|
||||
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sc: signatureCache,
|
||||
sr: stateByRooter,
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: []byte{0xff}, // Different from parentRoot
|
||||
headSlot: columnSlot,
|
||||
},
|
||||
fc: &mockForkchoicer{
|
||||
// Return different roots for parent vs head to simulate different forks
|
||||
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
return root, nil // Returns input, so parent [0...] != head [0xff...]
|
||||
},
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.ValidProposerSignature(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, stateByRootCalled, "StateByRoot should be called when dependent roots differ")
|
||||
})
|
||||
|
||||
t.Run("same dependent root head far ahead - uses head state with ProcessSlots", func(t *testing.T) {
|
||||
// Parent is ancestor of head on same chain, but head is in epoch 1 while column is in epoch 3.
|
||||
// headEpoch (1) + 1 < dataColumnEpoch (3), so ProcessSlots is called on head state.
|
||||
signatureCache := &mockSignatureCache{
|
||||
svcb: func(signatureData signatureData) (bool, error) {
|
||||
return false, nil // Cache miss
|
||||
},
|
||||
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
|
||||
return nil // Signature valid
|
||||
},
|
||||
}
|
||||
|
||||
headStateCalled := false
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sc: signatureCache,
|
||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
},
|
||||
fc: &mockForkchoicer{
|
||||
// Return same root for both to simulate same chain
|
||||
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
return [32]byte{0xaa}, nil // Same for all inputs
|
||||
},
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Wrap to detect HeadState call
|
||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||
wrappedHsp := &mockHeadStateProvider{
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headState: originalHsp.headState,
|
||||
}
|
||||
initializer.shared.hsp = &headStateCallTracker{
|
||||
mockHeadStateProvider: wrappedHsp,
|
||||
headStateCalled: &headStateCalled,
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.ValidProposerSignature(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, headStateCalled, "HeadState should be called when head is far ahead")
|
||||
})
|
||||
}
|
||||
|
||||
// headStateCallTracker wraps mockHeadStateProvider to track HeadState calls.
|
||||
type headStateCallTracker struct {
|
||||
*mockHeadStateProvider
|
||||
headStateCalled *bool
|
||||
}
|
||||
|
||||
func (h *headStateCallTracker) HeadState(ctx context.Context) (state.BeaconState, error) {
|
||||
*h.headStateCalled = true
|
||||
return h.mockHeadStateProvider.HeadState(ctx)
|
||||
}
|
||||
|
||||
func (h *headStateCallTracker) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
return h.mockHeadStateProvider.HeadRoot(ctx)
|
||||
}
|
||||
|
||||
func (h *headStateCallTracker) HeadSlot() primitives.Slot {
|
||||
return h.mockHeadStateProvider.HeadSlot()
|
||||
}
|
||||
|
||||
func (h *headStateCallTracker) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
|
||||
return h.mockHeadStateProvider.HeadStateReadOnly(ctx)
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- post electra we now call attestation data once per slot and use a cache for subsequent requests
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- fixed broken and old links to actual
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- Use dependent root and target root to verify data column proposer index.
|
||||
3
changelog/pvl-geth-v1.16.8.md
Normal file
3
changelog/pvl-geth-v1.16.8.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Security
|
||||
|
||||
- Update go-ethereum to v1.16.8
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Avoid unnessary heap allocation while calling MixInLength
|
||||
4
deps.bzl
4
deps.bzl
@@ -810,8 +810,8 @@ def prysm_deps():
|
||||
patches = [
|
||||
"//third_party:com_github_ethereum_go_ethereum_secp256k1.patch",
|
||||
],
|
||||
sum = "h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ=",
|
||||
version = "v1.16.7",
|
||||
sum = "h1:LLLfkZWijhR5m6yrAXbdlTeXoqontH+Ga2f9igY7law=",
|
||||
version = "v1.16.8",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_verkle",
|
||||
|
||||
2
go.mod
2
go.mod
@@ -15,7 +15,7 @@ require (
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/emicklei/dot v1.6.2
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5
|
||||
github.com/ethereum/go-ethereum v1.16.7
|
||||
github.com/ethereum/go-ethereum v1.16.8
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
|
||||
8
go.sum
8
go.sum
@@ -55,10 +55,10 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 h1:d/SJkN8/9Ca+1YmuDiUJxAiV4w/a9S8NcsG7GMQSrVI=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506/go.mod h1:6TZI4FU6zT8x6ZfWa1J8YQ2NgW0wLV/W3fHRca8ISBo=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
|
||||
github.com/OffchainLabs/hashtree v0.2.3 h1:nM8dBAQZzHLzzM14FaAHXnHTAXZIst69v5xWuS48y/c=
|
||||
github.com/OffchainLabs/hashtree v0.2.3/go.mod h1:b07+cRZs+eAR8TR57CB9TQlt5Gnl/06Xs76xt/1wq0M=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
@@ -238,8 +238,8 @@ github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=
|
||||
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8=
|
||||
github.com/ethereum/go-ethereum v1.16.7 h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ=
|
||||
github.com/ethereum/go-ethereum v1.16.7/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk=
|
||||
github.com/ethereum/go-ethereum v1.16.8 h1:LLLfkZWijhR5m6yrAXbdlTeXoqontH+Ga2f9igY7law=
|
||||
github.com/ethereum/go-ethereum v1.16.8/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk=
|
||||
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
|
||||
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
||||
@@ -71,9 +71,17 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
|
||||
return
|
||||
}
|
||||
|
||||
committeeIndex := duty.CommitteeIndex
|
||||
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
|
||||
if postElectra {
|
||||
committeeIndex = 0
|
||||
}
|
||||
|
||||
data, err := v.getAttestationData(ctx, slot, duty.CommitteeIndex)
|
||||
req := ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
}
|
||||
data, err := v.validatorClient.AttestationData(ctx, req)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not request attestation to sign at slot")
|
||||
if v.emitAccountMetrics {
|
||||
|
||||
@@ -65,59 +65,57 @@ var (
|
||||
)
|
||||
|
||||
type validator struct {
|
||||
logValidatorPerformance bool
|
||||
distributed bool
|
||||
enableAPI bool
|
||||
disableDutiesPolling bool
|
||||
emitAccountMetrics bool
|
||||
aggregatedSlotCommitteeIDCacheLock sync.Mutex
|
||||
attLogsLock sync.Mutex
|
||||
attSelectionLock sync.Mutex
|
||||
highestValidSlotLock sync.Mutex
|
||||
domainDataLock sync.RWMutex
|
||||
blacklistedPubkeysLock sync.RWMutex
|
||||
prevEpochBalancesLock sync.RWMutex
|
||||
cachedAttestationDataLock sync.RWMutex
|
||||
dutiesLock sync.RWMutex
|
||||
cachedAttestationData *ethpb.AttestationData
|
||||
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
|
||||
eventsChannel chan *eventClient.Event
|
||||
highestValidSlot primitives.Slot
|
||||
submittedAggregates map[submittedAttKey]*submittedAtt
|
||||
graffitiStruct *graffiti.Graffiti
|
||||
syncCommitteeStats syncCommitteeStats
|
||||
slotFeed *event.Feed
|
||||
domainDataCache *ristretto.Cache[string, proto.Message]
|
||||
aggregatedSlotCommitteeIDCache *lru.Cache
|
||||
attSelections map[attSelectionKey]iface.BeaconCommitteeSelection
|
||||
interopKeysConfig *local.InteropKeymanagerConfig
|
||||
duties *ethpb.ValidatorDutiesContainer
|
||||
signedValidatorRegistrations map[[fieldparams.BLSPubkeyLength]byte]*ethpb.SignedValidatorRegistrationV1
|
||||
proposerSettings *proposer.Settings
|
||||
web3SignerConfig *remoteweb3signer.SetupConfig
|
||||
ticker slots.Ticker
|
||||
genesisTime time.Time
|
||||
highestValidSlot primitives.Slot
|
||||
slotFeed *event.Feed
|
||||
startBalances map[[fieldparams.BLSPubkeyLength]byte]uint64
|
||||
prevEpochBalances map[[fieldparams.BLSPubkeyLength]byte]uint64
|
||||
blacklistedPubkeys map[[fieldparams.BLSPubkeyLength]byte]bool
|
||||
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus
|
||||
wallet *wallet.Wallet
|
||||
walletInitializedChan chan *wallet.Wallet
|
||||
currentHostIndex uint64
|
||||
walletInitializedFeed *event.Feed
|
||||
graffiti []byte
|
||||
graffitiStruct *graffiti.Graffiti
|
||||
graffitiOrderedIndex uint64
|
||||
submittedAtts map[submittedAttKey]*submittedAtt
|
||||
validatorsRegBatchSize int
|
||||
beaconNodeHosts []string
|
||||
currentHostIndex uint64
|
||||
validatorClient iface.ValidatorClient
|
||||
chainClient iface.ChainClient
|
||||
nodeClient iface.NodeClient
|
||||
prysmChainClient iface.PrysmChainClient
|
||||
db db.Database
|
||||
km keymanager.IKeymanager
|
||||
accountChangedSub event.Subscription
|
||||
ticker slots.Ticker
|
||||
beaconNodeHosts []string
|
||||
genesisTime time.Time
|
||||
graffiti []byte
|
||||
web3SignerConfig *remoteweb3signer.SetupConfig
|
||||
proposerSettings *proposer.Settings
|
||||
signedValidatorRegistrations map[[fieldparams.BLSPubkeyLength]byte]*ethpb.SignedValidatorRegistrationV1
|
||||
validatorsRegBatchSize int
|
||||
interopKeysConfig *local.InteropKeymanagerConfig
|
||||
attSelections map[attSelectionKey]iface.BeaconCommitteeSelection
|
||||
aggregatedSlotCommitteeIDCache *lru.Cache
|
||||
domainDataCache *ristretto.Cache[string, proto.Message]
|
||||
voteStats voteStats
|
||||
syncCommitteeStats syncCommitteeStats
|
||||
submittedAtts map[submittedAttKey]*submittedAtt
|
||||
submittedAggregates map[submittedAttKey]*submittedAtt
|
||||
logValidatorPerformance bool
|
||||
emitAccountMetrics bool
|
||||
enableAPI bool
|
||||
distributed bool
|
||||
domainDataLock sync.RWMutex
|
||||
attLogsLock sync.Mutex
|
||||
aggregatedSlotCommitteeIDCacheLock sync.Mutex
|
||||
highestValidSlotLock sync.Mutex
|
||||
prevEpochBalancesLock sync.RWMutex
|
||||
blacklistedPubkeysLock sync.RWMutex
|
||||
attSelectionLock sync.Mutex
|
||||
dutiesLock sync.RWMutex
|
||||
disableDutiesPolling bool
|
||||
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
|
||||
eventsChannel chan *eventClient.Event
|
||||
accountChangedSub event.Subscription
|
||||
}
|
||||
|
||||
type validatorStatus struct {
|
||||
@@ -979,54 +977,6 @@ func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, doma
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getAttestationData fetches attestation data from the beacon node with caching for post-Electra.
|
||||
// Post-Electra, attestation data is identical for all validators in the same slot (committee index is always 0),
|
||||
// so we cache it to avoid redundant beacon node requests.
|
||||
func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) (*ethpb.AttestationData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.getAttestationData")
|
||||
defer span.End()
|
||||
|
||||
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
|
||||
|
||||
// Pre-Electra: no caching since committee index varies per validator
|
||||
if !postElectra {
|
||||
return v.validatorClient.AttestationData(ctx, ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
})
|
||||
}
|
||||
|
||||
// Post-Electra: check cache first (committee index is always 0)
|
||||
v.cachedAttestationDataLock.RLock()
|
||||
if v.cachedAttestationData != nil && v.cachedAttestationData.Slot == slot {
|
||||
data := v.cachedAttestationData
|
||||
v.cachedAttestationDataLock.RUnlock()
|
||||
return data, nil
|
||||
}
|
||||
v.cachedAttestationDataLock.RUnlock()
|
||||
|
||||
// Cache miss - acquire write lock and fetch
|
||||
v.cachedAttestationDataLock.Lock()
|
||||
defer v.cachedAttestationDataLock.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock (another goroutine may have filled the cache)
|
||||
if v.cachedAttestationData != nil && v.cachedAttestationData.Slot == slot {
|
||||
return v.cachedAttestationData, nil
|
||||
}
|
||||
|
||||
data, err := v.validatorClient.AttestationData(ctx, ðpb.AttestationDataRequest{
|
||||
Slot: slot,
|
||||
CommitteeIndex: 0,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v.cachedAttestationData = data
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
|
||||
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := range attesterKeys {
|
||||
|
||||
@@ -2977,207 +2977,3 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
|
||||
require.NoError(t, v.checkDependentRoots(ctx, head))
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PreElectraNoCaching(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
// Pre-Electra slot (Electra fork epoch is far in the future by default)
|
||||
preElectraSlot := primitives.Slot(10)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 5,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Each call should go to the beacon node (no caching pre-Electra)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 5,
|
||||
}).Return(expectedData, nil)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: preElectraSlot,
|
||||
CommitteeIndex: 7,
|
||||
}).Return(expectedData, nil)
|
||||
|
||||
// First call with committee index 5
|
||||
data1, err := v.getAttestationData(context.Background(), preElectraSlot, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data1)
|
||||
|
||||
// Second call with different committee index 7 - should still call beacon node
|
||||
data2, err := v.getAttestationData(context.Background(), preElectraSlot, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data2)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraCaching(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
// Post-Electra slot
|
||||
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Only ONE call should go to the beacon node (caching post-Electra)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(expectedData, nil).Times(1)
|
||||
|
||||
// First call - should hit beacon node
|
||||
data1, err := v.getAttestationData(context.Background(), postElectraSlot, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data1)
|
||||
|
||||
// Second call with different committee index - should use cache
|
||||
data2, err := v.getAttestationData(context.Background(), postElectraSlot, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data2)
|
||||
|
||||
// Third call - should still use cache
|
||||
data3, err := v.getAttestationData(context.Background(), postElectraSlot, 10)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedData, data3)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraCacheInvalidatesOnNewSlot(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
slot1 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
slot2 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 6)
|
||||
|
||||
dataSlot1 := ðpb.AttestationData{
|
||||
Slot: slot1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
dataSlot2 := ðpb.AttestationData{
|
||||
Slot: slot2,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Expect one call per slot
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: slot1,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(dataSlot1, nil).Times(1)
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: slot2,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(dataSlot2, nil).Times(1)
|
||||
|
||||
// First slot - should hit beacon node
|
||||
data1, err := v.getAttestationData(context.Background(), slot1, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot1, data1)
|
||||
|
||||
// Same slot - should use cache
|
||||
data1Again, err := v.getAttestationData(context.Background(), slot1, 7)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot1, data1Again)
|
||||
|
||||
// New slot - should invalidate cache and hit beacon node
|
||||
data2, err := v.getAttestationData(context.Background(), slot2, 5)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, dataSlot2, data2)
|
||||
}
|
||||
|
||||
func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Set up Electra fork epoch for this test
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
originalElectraForkEpoch := cfg.ElectraForkEpoch
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer func() {
|
||||
cfg.ElectraForkEpoch = originalElectraForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}()
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := &validator{validatorClient: client}
|
||||
|
||||
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
|
||||
|
||||
expectedData := ðpb.AttestationData{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
|
||||
}
|
||||
|
||||
// Should only call beacon node once despite concurrent requests
|
||||
client.EXPECT().AttestationData(gomock.Any(), ðpb.AttestationDataRequest{
|
||||
Slot: postElectraSlot,
|
||||
CommitteeIndex: 0,
|
||||
}).Return(expectedData, nil).Times(1)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := 10
|
||||
results := make([]*ethpb.AttestationData, numGoroutines)
|
||||
errs := make([]error, numGoroutines)
|
||||
|
||||
for i := range numGoroutines {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
results[idx], errs[idx] = v.getAttestationData(context.Background(), postElectraSlot, primitives.CommitteeIndex(idx))
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for i := range numGoroutines {
|
||||
require.NoError(t, errs[i])
|
||||
require.DeepEqual(t, expectedData, results[i])
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user