Compare commits

..

2 Commits

Author SHA1 Message Date
james-prysm
edcf9e86c5 changelog and ci complaints 2026-01-08 13:52:38 -06:00
james-prysm
2e8d579d31 adding cache for attestation data so we don't call it multiple times 2026-01-08 13:38:06 -06:00
19 changed files with 276 additions and 191 deletions

View File

@@ -0,0 +1,3 @@
### Changed
- post electra we now call attestation data once per slot and use a cache for subsequent requests

View File

@@ -1,2 +0,0 @@
### Added
- Add feature flag to use hashtree instead of gohashtre.

View File

@@ -70,7 +70,6 @@ type Flags struct {
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
EnableHashtree bool // Enables usage of the hashtree library for hashing
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
@@ -238,10 +237,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(enableFullSSZDataLogging)
cfg.EnableFullSSZDataLogging = true
}
if ctx.IsSet(enableHashtree.Name) {
logEnabled(enableHashtree)
cfg.EnableHashtree = true
}
cfg.EnableVerboseSigVerification = true
if ctx.IsSet(disableVerboseSigVerification.Name) {
logEnabled(disableVerboseSigVerification)

View File

@@ -133,10 +133,6 @@ var (
Name: "enable-beacon-rest-api",
Usage: "(Experimental): Enables of the beacon REST API when querying a beacon node.",
}
enableHashtree = &cli.BoolFlag{
Name: "enable-hashtree",
Usage: "(Experimental): Enables the hashtree hashing library.",
}
disableVerboseSigVerification = &cli.BoolFlag{
Name: "disable-verbose-sig-verification",
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
@@ -284,7 +280,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
forceHeadFlag,
blacklistRoots,
lowValcountSweep,
enableHashtree,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -36,6 +36,7 @@ go_library(
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -62,7 +63,6 @@ go_test(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash/htr:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -72,5 +72,6 @@ go_test(
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
],
)

View File

@@ -5,10 +5,10 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/gohashtree"
)
const (
@@ -45,7 +45,7 @@ func VerifyKZGInclusionProof(blob ROBlob) error {
return errInvalidBodyRoot
}
chunks := makeChunk(blob.KzgCommitment)
htr.HashChunks(chunks, chunks)
gohashtree.HashChunks(chunks, chunks)
verified := trie.VerifyMerkleProof(root, chunks[0][:], blob.Index+KZGOffset, blob.CommitmentInclusionProof)
if !verified {
return errInvalidInclusionProof
@@ -182,7 +182,7 @@ func LeavesFromCommitments(commitments [][]byte) [][]byte {
leaves := make([][]byte, len(commitments))
for i, kzg := range commitments {
chunk := makeChunk(kzg)
htr.HashChunks(chunk, chunk)
gohashtree.HashChunks(chunk, chunk)
leaves[i] = chunk[0][:]
}
return leaves

View File

@@ -8,10 +8,10 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/prysmaticlabs/gohashtree"
)
func Test_MerkleProofKZGCommitment_Altair(t *testing.T) {
@@ -108,7 +108,7 @@ func Test_MerkleProofKZGCommitment(t *testing.T) {
require.Equal(t, true, trie.VerifyMerkleProof(root[:], commitmentsRoot[:], kzgPosition, topProof[:len(topProof)-1]))
chunk := makeChunk(kzgs[index])
htr.HashChunks(chunk, chunk)
gohashtree.HashChunks(chunk, chunk)
require.Equal(t, true, trie.VerifyMerkleProof(root[:], chunk[0][:], uint64(index+KZGOffset), proof))
}

View File

@@ -5,12 +5,7 @@ go_library(
srcs = ["hashtree.go"],
importpath = "github.com/OffchainLabs/prysm/v7/crypto/hash/htr",
visibility = ["//visibility:public"],
deps = [
"//config/features:go_default_library",
"@com_github_offchainlabs_hashtree//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
deps = ["@com_github_prysmaticlabs_gohashtree//:go_default_library"],
)
go_test(
@@ -18,8 +13,5 @@ go_test(
size = "small",
srcs = ["hashtree_test.go"],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//testing/require:go_default_library",
],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -4,38 +4,14 @@ import (
"runtime"
"sync"
"github.com/OffchainLabs/hashtree"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/prysmaticlabs/gohashtree"
log "github.com/sirupsen/logrus"
)
const minSliceSizeToParallelize = 5000
// Hash hashes chunks pairwise into digests using the configured hashing library.
// It performs input validation (odd chunks, digest length).
func Hash(digests, chunks [][32]byte) error {
if features.Get().EnableHashtree {
return hashtree.Hash(digests, chunks)
}
return gohashtree.Hash(digests, chunks)
}
// HashChunks hashes chunks pairwise into digests without error checking.
// The caller must ensure inputs are valid (even chunks, sufficient digest space).
func HashChunks(digests, chunks [][32]byte) {
if features.Get().EnableHashtree {
if err := hashtree.Hash(digests, chunks); err != nil {
log.WithError(err).Error("Could not hash chunks")
}
} else {
gohashtree.HashChunks(digests, chunks)
}
}
func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGroup) {
defer wg.Done()
err := Hash(outputList, inputList)
err := gohashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
@@ -49,7 +25,7 @@ func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGrou
func VectorizedSha256(inputList [][32]byte) [][32]byte {
outputList := make([][32]byte, len(inputList)/2)
if len(inputList) < minSliceSizeToParallelize {
err := Hash(outputList, inputList)
err := gohashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
@@ -62,7 +38,7 @@ func VectorizedSha256(inputList [][32]byte) [][32]byte {
for j := range n {
go hashParallel(inputList[j*2*groupSize:(j+1)*2*groupSize], outputList[j*groupSize:], &wg)
}
err := Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
err := gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}

View File

@@ -4,7 +4,6 @@ import (
"sync"
"testing"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
@@ -24,25 +23,3 @@ func Test_VectorizedSha256(t *testing.T) {
require.Equal(t, r, hash2[i])
}
}
func Test_VectorizedSha256_hashtree_enabled(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableHashtree: true,
})
defer resetCfg()
largeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
secondLargeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
hash1 := make([][32]byte, 16*minSliceSizeToParallelize)
wg := sync.WaitGroup{}
wg.Go(func() {
tempHash := VectorizedSha256(largeSlice)
copy(hash1, tempHash)
})
wg.Wait()
hash2 := VectorizedSha256(secondLargeSlice)
require.Equal(t, len(hash1), len(hash2))
for i, r := range hash1 {
require.Equal(t, r, hash2[i])
}
}

View File

@@ -2449,15 +2449,6 @@ def prysm_deps():
sum = "h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=",
version = "v1.4.11",
)
go_repository(
name = "com_github_offchainlabs_hashtree",
build_file_generation = "off",
importpath = "github.com/OffchainLabs/hashtree",
patch_args = ["-p1"],
patches = ["//third_party:com_github_offchainlabs_hashtree.patch"],
sum = "h1:R6DAjgAUwwfgji3jEI4WUxtZ3eJ+FbRHjW21UPMBJyo=",
version = "v0.2.2",
)
go_repository(
name = "com_github_oklog_oklog",
importpath = "github.com/oklog/oklog",

View File

@@ -21,6 +21,7 @@ go_library(
"@com_github_minio_sha256_simd//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
],
)

View File

@@ -6,6 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/gohashtree"
)
var errInvalidNilSlice = errors.New("invalid empty slice")
@@ -181,10 +182,10 @@ func MerkleizeListSSZ[T Hashable](elements []T, limit uint64) ([32]byte, error)
chunks := make([][32]byte, 2)
chunks[0] = body
binary.LittleEndian.PutUint64(chunks[1][:], uint64(len(elements)))
if err = htr.Hash(chunks, chunks); err != nil {
if err := gohashtree.Hash(chunks, chunks); err != nil {
return [32]byte{}, err
}
return chunks[0], nil
return chunks[0], err
}
// MerkleizeByteSliceSSZ hashes a byteslice by chunkifying it and returning the

1
go.mod
View File

@@ -6,7 +6,6 @@ require (
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d
github.com/MariusVanDerWijden/tx-fuzz v1.4.0
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506
github.com/OffchainLabs/hashtree v0.2.3
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
github.com/bazelbuild/rules_go v0.23.2
github.com/btcsuite/btcd/btcec/v2 v2.3.4

2
go.sum
View File

@@ -59,8 +59,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 h1:d/SJkN8/9Ca+1YmuDiUJxAiV4w/a9S8NcsG7GMQSrVI=
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506/go.mod h1:6TZI4FU6zT8x6ZfWa1J8YQ2NgW0wLV/W3fHRca8ISBo=
github.com/OffchainLabs/hashtree v0.2.3 h1:nM8dBAQZzHLzzM14FaAHXnHTAXZIst69v5xWuS48y/c=
github.com/OffchainLabs/hashtree v0.2.3/go.mod h1:b07+cRZs+eAR8TR57CB9TQlt5Gnl/06Xs76xt/1wq0M=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=

View File

@@ -1,90 +0,0 @@
diff -urN a/BUILD.bazel b/BUILD.bazel
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
@@ -0,0 +1,86 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "bindings.go",
+ "sha256_1_generic.go",
+ ] + select({
+ "@io_bazel_rules_go//go/platform:linux_amd64": [
+ "bindings_amd64.go",
+ "wrapper_linux_amd64.s",
+ ":hashtree_amd64_syso",
+ ],
+ "@io_bazel_rules_go//go/platform:windows_amd64": [
+ "bindings_amd64.go",
+ "wrapper_windows_amd64.s",
+ ":hashtree_amd64_syso",
+ ],
+ "@io_bazel_rules_go//go/platform:linux_arm64": [
+ "bindings_arm64.go",
+ "wrapper_arm64.s",
+ ":hashtree_arm64_syso",
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_arm64": [
+ "bindings_arm64.go",
+ "wrapper_arm64.s",
+ ":hashtree_arm64_syso",
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
+ "bindings_darwin_amd64.go",
+ ],
+ "//conditions:default": [],
+ }),
+ importpath = "github.com/OffchainLabs/hashtree",
+ visibility = ["//visibility:public"],
+ deps = ["@com_github_klauspost_cpuid_v2//:go_default_library"],
+)
+
+genrule(
+ name = "hashtree_arm64_syso",
+ srcs = [":hashtree_arm64"],
+ outs = ["hashtree_arm64.syso"],
+ cmd = "cp $(location :hashtree_arm64) $@",
+)
+
+genrule(
+ name = "hashtree_amd64_syso",
+ srcs = [":hashtree_amd64"],
+ outs = ["hashtree_amd64.syso"],
+ cmd = "cp $(location :hashtree_amd64) $@",
+)
+
+cc_library(
+ name = "hashtree_arm64",
+ srcs = [
+ "src/hashtree.c",
+ "src/sha256_generic.c",
+ "src/sha256_armv8_crypto.S",
+ "src/sha256_armv8_neon_x1.S",
+ "src/sha256_armv8_neon_x4.S",
+ ],
+ hdrs = ["src/hashtree.h"],
+ copts = ["-O3", "-Wall"],
+ linkstatic = True,
+ strip_include_prefix = "src",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "hashtree_amd64",
+ srcs = [
+ "src/hashtree.c",
+ "src/sha256_generic.c",
+ "src/sha256_avx_x1.S",
+ "src/sha256_avx_x4.S",
+ "src/sha256_avx_x8.S",
+ "src/sha256_avx_x16.S",
+ "src/sha256_shani.S",
+ "src/sha256_sse_x1.S",
+ ],
+ hdrs = ["src/hashtree.h"],
+ copts = ["-O3", "-Wall", "-fno-integrated-as"],
+ linkstatic = True,
+ strip_include_prefix = "src",
+ visibility = ["//visibility:public"],
+)

View File

@@ -71,17 +71,9 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
return
}
committeeIndex := duty.CommitteeIndex
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
if postElectra {
committeeIndex = 0
}
req := &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
}
data, err := v.validatorClient.AttestationData(ctx, req)
data, err := v.getAttestationData(ctx, slot, duty.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not request attestation to sign at slot")
if v.emitAccountMetrics {

View File

@@ -112,6 +112,9 @@ type validator struct {
blacklistedPubkeysLock sync.RWMutex
attSelectionLock sync.Mutex
dutiesLock sync.RWMutex
attestationDataCacheLock sync.RWMutex
attestationDataCache *ethpb.AttestationData
attestationDataCacheSlot primitives.Slot
disableDutiesPolling bool
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
eventsChannel chan *eventClient.Event
@@ -977,6 +980,55 @@ func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, doma
return res, nil
}
// getAttestationData fetches attestation data from the beacon node with caching for post-Electra.
// Post-Electra, attestation data is identical for all validators in the same slot (committee index is always 0),
// so we cache it to avoid redundant beacon node requests.
func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) (*ethpb.AttestationData, error) {
ctx, span := trace.StartSpan(ctx, "validator.getAttestationData")
defer span.End()
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
// Pre-Electra: no caching since committee index varies per validator
if !postElectra {
return v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
// Post-Electra: check cache first (committee index is always 0)
v.attestationDataCacheLock.RLock()
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
data := v.attestationDataCache
v.attestationDataCacheLock.RUnlock()
return data, nil
}
v.attestationDataCacheLock.RUnlock()
// Cache miss - acquire write lock and fetch
v.attestationDataCacheLock.Lock()
defer v.attestationDataCacheLock.Unlock()
// Double-check after acquiring write lock (another goroutine may have filled the cache)
if v.attestationDataCacheSlot == slot && v.attestationDataCache != nil {
return v.attestationDataCache, nil
}
data, err := v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: 0,
})
if err != nil {
return nil, err
}
v.attestationDataCache = data
v.attestationDataCacheSlot = slot
return data, nil
}
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {

View File

@@ -2977,3 +2977,207 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
require.NoError(t, v.checkDependentRoots(ctx, head))
})
}
func TestGetAttestationData_PreElectraNoCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Pre-Electra slot (Electra fork epoch is far in the future by default)
preElectraSlot := primitives.Slot(10)
expectedData := &ethpb.AttestationData{
Slot: preElectraSlot,
CommitteeIndex: 5,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Each call should go to the beacon node (no caching pre-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 5,
}).Return(expectedData, nil)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 7,
}).Return(expectedData, nil)
// First call with committee index 5
data1, err := v.getAttestationData(context.Background(), preElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index 7 - should still call beacon node
data2, err := v.getAttestationData(context.Background(), preElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
}
func TestGetAttestationData_PostElectraCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Post-Electra slot
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Only ONE call should go to the beacon node (caching post-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
// First call - should hit beacon node
data1, err := v.getAttestationData(context.Background(), postElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index - should use cache
data2, err := v.getAttestationData(context.Background(), postElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
// Third call - should still use cache
data3, err := v.getAttestationData(context.Background(), postElectraSlot, 10)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data3)
}
func TestGetAttestationData_PostElectraCacheInvalidatesOnNewSlot(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
slot1 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
slot2 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 6)
dataSlot1 := &ethpb.AttestationData{
Slot: slot1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
dataSlot2 := &ethpb.AttestationData{
Slot: slot2,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Expect one call per slot
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot1,
CommitteeIndex: 0,
}).Return(dataSlot1, nil).Times(1)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot2,
CommitteeIndex: 0,
}).Return(dataSlot2, nil).Times(1)
// First slot - should hit beacon node
data1, err := v.getAttestationData(context.Background(), slot1, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1)
// Same slot - should use cache
data1Again, err := v.getAttestationData(context.Background(), slot1, 7)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1Again)
// New slot - should invalidate cache and hit beacon node
data2, err := v.getAttestationData(context.Background(), slot2, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot2, data2)
}
func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Should only call beacon node once despite concurrent requests
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
var wg sync.WaitGroup
numGoroutines := 10
results := make([]*ethpb.AttestationData, numGoroutines)
errs := make([]error, numGoroutines)
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
results[idx], errs[idx] = v.getAttestationData(context.Background(), postElectraSlot, primitives.CommitteeIndex(idx))
}(i)
}
wg.Wait()
for i := 0; i < numGoroutines; i++ {
require.NoError(t, errs[i])
require.DeepEqual(t, expectedData, results[i])
}
}