mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
43 Commits
fixE2E
...
v5.0.4-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
26c0178ce4 | ||
|
|
b6f93e5305 | ||
|
|
7ad870fdba | ||
|
|
8b9651606d | ||
|
|
a2a77b5bfc | ||
|
|
b807afbd7d | ||
|
|
6df83ebef7 | ||
|
|
7b4d23801a | ||
|
|
63d31cfa1e | ||
|
|
f947fefa19 | ||
|
|
7a55dc6cc3 | ||
|
|
024163b923 | ||
|
|
8eb964c3e5 | ||
|
|
69c60a6611 | ||
|
|
7de5381af8 | ||
|
|
25dfed5cf4 | ||
|
|
b9f0fc52ea | ||
|
|
3a186a1347 | ||
|
|
c2117e5747 | ||
|
|
0a9e99ce74 | ||
|
|
c3486c58cb | ||
|
|
5ddd5cb3fb | ||
|
|
f079db62bd | ||
|
|
a5425d9c97 | ||
|
|
ea5c14affb | ||
|
|
f4f00e8f35 | ||
|
|
1fb4c38503 | ||
|
|
d041d8da32 | ||
|
|
11684392d1 | ||
|
|
276b97530f | ||
|
|
d5daf49a9a | ||
|
|
feb16ae4aa | ||
|
|
219301339c | ||
|
|
aec349f75a | ||
|
|
5f909caedf | ||
|
|
ba6dff3adb | ||
|
|
8cd05f098b | ||
|
|
425f5387fa | ||
|
|
f2ce115ade | ||
|
|
090a3e1ded | ||
|
|
c0acb7d352 | ||
|
|
0d6070e6fc | ||
|
|
bd00f851f0 |
@@ -12,8 +12,7 @@
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_build_event_upload=minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
|
||||
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
|
||||
18
WORKSPACE
18
WORKSPACE
@@ -29,23 +29,7 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
zig_toolchains()
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
|
||||
@@ -1,11 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
"jwt.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -36,19 +36,19 @@ func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
n.RLock()
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
newStatus := n.node.IsHealthy(ctx)
|
||||
if n.isHealthy == nil {
|
||||
n.isHealthy = &newStatus
|
||||
}
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
n.RUnlock()
|
||||
|
||||
isStatusChanged := newStatus != *n.isHealthy
|
||||
if isStatusChanged {
|
||||
n.Lock()
|
||||
// Double-check the condition to ensure it hasn't changed since the first check.
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
n.Unlock() // It's better to unlock as soon as the protected section is over.
|
||||
// Send the new status to the health channel
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
return newStatus
|
||||
|
||||
@@ -99,9 +99,9 @@ func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
|
||||
n.CheckHealth(context.Background())
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -4,4 +4,6 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
SystemLogsPrefix = "health/logs"
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
32
api/jwt.go
Normal file
32
api/jwt.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
|
||||
// used for beacon node -> execution client
|
||||
// used for web client -> validator client
|
||||
func GenerateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n != 32 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
// ValidateAuthToken validating auth token for web
|
||||
func ValidateAuthToken(token string) error {
|
||||
b, err := hexutil.Decode(token)
|
||||
// token should be hex-encoded and at least 256 bits
|
||||
if err != nil || len(b) < 32 {
|
||||
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
api/jwt_test.go
Normal file
13
api/jwt_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestGenerateRandomHexString(t *testing.T) {
|
||||
token, err := GenerateRandomHexString()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ValidateAuthToken(token))
|
||||
}
|
||||
@@ -137,7 +137,7 @@ go_test(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -399,14 +398,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -518,13 +509,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -497,7 +497,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -80,7 +80,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -79,7 +79,7 @@ type testServiceRequirements struct {
|
||||
attPool attestations.Pool
|
||||
attSrv *attestations.Service
|
||||
blsPool *blstoexec.Pool
|
||||
dc *depositcache.DepositCache
|
||||
dc *depositsnapshot.Cache
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
@@ -94,7 +94,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
|
||||
require.NoError(t, err)
|
||||
blsPool := blstoexec.NewPool()
|
||||
dc, err := depositcache.New()
|
||||
dc, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
req := &testServiceRequirements{
|
||||
ctx: ctx,
|
||||
|
||||
3
beacon-chain/cache/BUILD.bazel
vendored
3
beacon-chain/cache/BUILD.bazel
vendored
@@ -6,6 +6,7 @@ go_library(
|
||||
"active_balance.go",
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
@@ -70,6 +71,7 @@ go_test(
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
@@ -93,6 +95,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
37
beacon-chain/cache/active_balance.go
vendored
37
beacon-chain/cache/active_balance.go
vendored
@@ -3,17 +3,13 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,36 +82,3 @@ func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
balanceCacheHit.Inc()
|
||||
return value.(uint64), nil
|
||||
}
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", errors.Errorf("start slot calculation overflows: %v", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
|
||||
31
beacon-chain/cache/active_balance_test.go
vendored
31
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,12 +1,13 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -27,33 +28,33 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoPhase0(raw)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewEffectiveBalanceCache()
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
cc := cache.NewEffectiveBalanceCache()
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b := uint64(100)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(200)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
_, err = cc.Get(st)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(300)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cc.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
}
|
||||
@@ -72,6 +73,6 @@ func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(math.MaxUint64)))
|
||||
|
||||
_, err = balanceCacheKey(st)
|
||||
_, err = cache.BalanceCacheKey(st)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
43
beacon-chain/cache/balance_cache_key.go
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", fmt.Errorf("start slot calculation overflows: %w", err)
|
||||
}
|
||||
prevSlot := primitives.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
11
beacon-chain/cache/checkpoint_state_test.go
vendored
11
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -1,8 +1,9 @@
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
cache := cache.NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -58,16 +59,16 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
c := cache.NewCheckpointStateCache()
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
for i := uint64(0); i < uint64(cache.MaxCheckpointStateSize()+100); i++ {
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: primitives.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
|
||||
}
|
||||
|
||||
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
50
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -1,50 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"log.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposits_cache_test.go",
|
||||
"pending_deposits_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
327
beacon-chain/cache/depositcache/deposits_cache.go
vendored
@@ -1,327 +0,0 @@
|
||||
// Package depositcache is the source of validator deposits maintained
|
||||
// in-memory by the beacon node – deposits processed from the
|
||||
// eth1 powchain are then stored in this cache to be accessed by
|
||||
// any other service during a beacon node's runtime.
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits",
|
||||
Help: "The number of total deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
// in the beacon state up to the latest finalized checkpoint.
|
||||
type FinalizedDeposits struct {
|
||||
deposits *trie.SparseMerkleTrie
|
||||
merkleTrieIndex int64
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*ethpb.DepositContainer
|
||||
deposits []*ethpb.DepositContainer
|
||||
finalizedDeposits FinalizedDeposits
|
||||
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*ethpb.DepositContainer{},
|
||||
deposits: []*ethpb.DepositContainer{},
|
||||
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if int(index) != len(dc.deposits) {
|
||||
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*ethpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
|
||||
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits()
|
||||
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(dc.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(dc.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return nil
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
|
||||
continue
|
||||
}
|
||||
if d.Index > eth1DepositIndex {
|
||||
break
|
||||
}
|
||||
depHash, err := d.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash deposit data")
|
||||
}
|
||||
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
|
||||
return errors.Wrap(err, "could not insert deposit hash")
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("not a sparse merkle tree")
|
||||
}
|
||||
dc.finalizedDeposits = FinalizedDeposits{
|
||||
deposits: tree,
|
||||
merkleTrieIndex: eth1DepositIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return &FinalizedDeposits{
|
||||
deposits: dc.finalizedDeposits.deposits.Copy(),
|
||||
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
if dc.finalizedDeposits.Deposits() == nil {
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
dc.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deposits returns the cached internal deposit tree.
|
||||
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
|
||||
if fd.deposits != nil {
|
||||
return fd.deposits
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MerkleTrieIndex represents the last finalized index in
|
||||
// the finalized deposit container.
|
||||
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
|
||||
return fd.merkleTrieIndex
|
||||
}
|
||||
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
1074
beacon-chain/cache/depositcache/deposits_cache_test.go
vendored
File diff suppressed because it is too large
Load Diff
5
beacon-chain/cache/depositcache/log.go
vendored
5
beacon-chain/cache/depositcache/log.go
vendored
@@ -1,5 +0,0 @@
|
||||
package depositcache
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "depositcache")
|
||||
151
beacon-chain/cache/depositcache/pending_deposits.go
vendored
151
beacon-chain/cache/depositcache/pending_deposits.go
vendored
@@ -1,151 +0,0 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacondb_pending_deposits",
|
||||
Help: "The number of pending deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
ðpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
depositCntrs := dc.PendingContainers(ctx, untilBlk)
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
log.Debug("Ignoring nil deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.Proto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
}
|
||||
if h == depRoot {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx >= 0 {
|
||||
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
|
||||
pendingDepositsCount.Dec()
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
dc.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
|
||||
}
|
||||
@@ -34,6 +34,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_fetcher_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
|
||||
@@ -262,6 +262,12 @@ func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedD
|
||||
}
|
||||
}
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
|
||||
@@ -1,82 +1,32 @@
|
||||
package depositcache
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var _ PendingDepositsFetcher = (*DepositCache)(nil)
|
||||
var _ PendingDepositsFetcher = (*Cache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
proof1 := makeDepositProof()
|
||||
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
proof2 := makeDepositProof()
|
||||
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
data := ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 0,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
depToRemove := ðpb.Deposit{Proof: proof1, Data: data}
|
||||
otherDep := ðpb.Deposit{Proof: proof2, Data: data}
|
||||
db.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
db.RemovePendingDeposit(context.Background(), depToRemove)
|
||||
|
||||
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
|
||||
t.Error("Failed to remove deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
|
||||
}
|
||||
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
proof := makeDepositProof()
|
||||
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
|
||||
data := ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 0,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
dep := ðpb.Deposit{Proof: proof, Data: data}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
|
||||
}
|
||||
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
@@ -96,7 +46,7 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
@@ -120,7 +70,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
@@ -99,11 +99,23 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
if d.depositCount <= 0 {
|
||||
return [32]byte{}, nil, ErrInvalidDepositCount
|
||||
}
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
if finalizedDeposits != 0 {
|
||||
finalizedDeposits = finalizedDeposits - 1
|
||||
if index >= d.depositCount {
|
||||
return [32]byte{}, nil, ErrInvalidIndex
|
||||
}
|
||||
if index <= finalizedDeposits {
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
finalizedIdx := -1
|
||||
if finalizedDeposits != 0 {
|
||||
fd, err := math.Int(finalizedDeposits)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
finalizedIdx = fd - 1
|
||||
}
|
||||
i, err := math.Int(index)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
if finalizedDeposits > 0 && i <= finalizedIdx {
|
||||
return [32]byte{}, nil, ErrInvalidIndex
|
||||
}
|
||||
leaf, proof := generateProof(d.tree, index, DepositContractDepth)
|
||||
|
||||
18
beacon-chain/cache/private_access_test.go
vendored
Normal file
18
beacon-chain/cache/private_access_test.go
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
)
|
||||
|
||||
func BalanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
return balanceCacheKey(st)
|
||||
}
|
||||
|
||||
func MaxCheckpointStateSize() int {
|
||||
return maxCheckpointStateSize
|
||||
}
|
||||
|
||||
func (c *CheckpointStateCache) Cache() *lru.Cache {
|
||||
return c.cache
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package cache
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -125,7 +126,7 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := NewSyncCommitteeHeadState()
|
||||
c := cache.NewSyncCommitteeHeadState()
|
||||
if tt.put != nil {
|
||||
err := c.Put(tt.put.slot, tt.put.state)
|
||||
if (err != nil) != tt.wantPutErr {
|
||||
|
||||
@@ -50,6 +50,8 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
|
||||
@@ -140,108 +140,140 @@ func BeaconCommittee(
|
||||
}
|
||||
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
return computeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
}
|
||||
|
||||
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignmentContainer struct {
|
||||
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignment struct {
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
}
|
||||
|
||||
// CommitteeAssignments is a map of validator indices pointing to the appropriate committee
|
||||
// assignment for the given epoch.
|
||||
//
|
||||
// 1. Determine the proposer validator index for each slot.
|
||||
// 2. Compute all committees.
|
||||
// 3. Determine the attesting slot for each committee.
|
||||
// 4. Construct a map of validator indices pointing to the respective committees.
|
||||
func CommitteeAssignments(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
epoch primitives.Epoch,
|
||||
) (map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
|
||||
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
|
||||
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
|
||||
nextEpoch := time.NextEpoch(state)
|
||||
if epoch > nextEpoch {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"epoch %d can't be greater than next epoch %d",
|
||||
epoch,
|
||||
nextEpoch,
|
||||
)
|
||||
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
|
||||
}
|
||||
|
||||
// We determine the slots in which proposers are supposed to act.
|
||||
// Some validators may need to propose multiple times per epoch, so
|
||||
// we use a map of proposer idx -> []slot to keep track of this possibility.
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
minValidStartSlot := primitives.Slot(0)
|
||||
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
if stateSlot := state.Slot(); stateSlot >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = stateSlot - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
}
|
||||
if startSlot < minValidStartSlot {
|
||||
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
return fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerAssignments calculates proposer assignments for each validator during the specified epoch.
|
||||
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
|
||||
// proposer for that slot and assigns them accordingly.
|
||||
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proposerIndexToSlots := make(map[primitives.ValidatorIndex][]primitives.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
}
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
if _, ok := proposerAssignments[i]; !ok {
|
||||
proposerAssignments[i] = make([]primitives.Slot, 0)
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
|
||||
// It retrieves active validator indices, determines the number of committees per slot, and computes
|
||||
// assignments for each validator based on their presence in the provided validators slice.
|
||||
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve active validator count for the specified epoch.
|
||||
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
// Each slot in an epoch has a different set of committees. This value is derived from the
|
||||
// active validator set, which does not change.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
|
||||
validatorIndexToCommittee := make(map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
|
||||
|
||||
// Compute all committees for all slots.
|
||||
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
// Compute committees.
|
||||
// Determine the number of committees per slot based on the number of active validator indices.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Compute committees for the current slot.
|
||||
for j := uint64(0); j < numCommitteesPerSlot; j++ {
|
||||
slot := startSlot + i
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j) /*committee index*/)
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cac := &CommitteeAssignmentContainer{
|
||||
Committee: committee,
|
||||
CommitteeIndex: primitives.CommitteeIndex(j),
|
||||
AttesterSlot: slot,
|
||||
}
|
||||
for _, vIndex := range committee {
|
||||
validatorIndexToCommittee[vIndex] = cac
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
assignments[vIndex] = &CommitteeAssignment{}
|
||||
}
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return validatorIndexToCommittee, proposerIndexToSlots, nil
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
// VerifyBitfieldLength verifies that a bitfield length matches the given committee size.
|
||||
@@ -359,7 +391,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -409,7 +441,7 @@ func ClearCache() {
|
||||
balanceCache.Clear()
|
||||
}
|
||||
|
||||
// computeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// ComputeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// validator indices and seed.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -424,7 +456,7 @@ func ClearCache() {
|
||||
// start = (len(indices) * index) // count
|
||||
// end = (len(indices) * uint64(index + 1)) // count
|
||||
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
|
||||
func computeCommittee(
|
||||
func ComputeCommittee(
|
||||
indices []primitives.ValidatorIndex,
|
||||
seed [32]byte,
|
||||
index, count uint64,
|
||||
@@ -451,9 +483,9 @@ func computeCommittee(
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
@@ -48,16 +49,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(t, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
|
||||
// Test shuffled indices are correct for index 5 committee
|
||||
index := uint64(5)
|
||||
committee5, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start := slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -65,7 +66,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
|
||||
// Test shuffled indices are correct for index 9 committee
|
||||
index = uint64(9)
|
||||
committee9, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start = slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -73,42 +74,45 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCommittee_RegressionTest(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
|
||||
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
|
||||
index := uint64(215)
|
||||
count := uint64(32)
|
||||
_, err := computeCommittee(indices, seed, index, count)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, count)
|
||||
require.ErrorContains(t, "index out of range", err)
|
||||
}
|
||||
|
||||
func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
bf := bitfield.Bitlist{0xFF, 0x01}
|
||||
committeeSize := uint64(8)
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
|
||||
bf = bitfield.Bitlist{0xFF, 0x07}
|
||||
committeeSize = 10
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
|
||||
_, err = helpers.ProposerAssignments(context.Background(), state, epoch+1)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -127,10 +131,10 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
for _, slots := range assignments {
|
||||
for _, s := range slots {
|
||||
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
|
||||
}
|
||||
}
|
||||
@@ -139,6 +143,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
@@ -149,6 +154,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
ActivationEpoch: activationEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
validatorIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -198,16 +204,18 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
cac := assignments[tt.index]
|
||||
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
|
||||
assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index)
|
||||
if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot {
|
||||
proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err)
|
||||
if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot {
|
||||
t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d",
|
||||
tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index)
|
||||
tt.proposerSlot, proposerAssignments[tt.index][0], tt.index)
|
||||
}
|
||||
assert.DeepEqual(t, tt.committee, cac.Committee, "Unexpected committee for validator index %d", tt.index)
|
||||
})
|
||||
@@ -215,7 +223,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -237,17 +245,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -263,12 +271,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, 0)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -285,12 +293,12 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
|
||||
slotsWithProposers := make(map[primitives.Slot]bool)
|
||||
for _, proposerSlots := range proposerIndexToSlots {
|
||||
for _, slot := range proposerSlots {
|
||||
for _, slots := range assignments {
|
||||
for _, slot := range slots {
|
||||
slotsWithProposers[slot] = true
|
||||
}
|
||||
}
|
||||
@@ -391,10 +399,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
if tt.verificationFailure {
|
||||
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
|
||||
} else {
|
||||
@@ -404,7 +412,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -421,20 +429,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
|
||||
epoch := primitives.Epoch(0)
|
||||
idx := primitives.CommitteeIndex(1)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -452,19 +460,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
e := time.CurrentEpoch(state)
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
|
||||
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
|
||||
|
||||
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
}
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
@@ -481,20 +489,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -515,20 +523,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -549,9 +557,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -559,7 +567,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -584,9 +592,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -594,7 +602,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -619,9 +627,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -629,7 +637,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -655,13 +663,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify previous epoch is cached
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
|
||||
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, activeIndices, "Did not cache active indices")
|
||||
}
|
||||
@@ -680,19 +688,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
wantedProposerIndices = append(wantedProposerIndices, index)
|
||||
}
|
||||
|
||||
17
beacon-chain/core/helpers/private_access_fuzz_noop_test.go
Normal file
17
beacon-chain/core/helpers/private_access_fuzz_noop_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
//go:build fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.FakeCommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
17
beacon-chain/core/helpers/private_access_test.go
Normal file
17
beacon-chain/core/helpers/private_access_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.CommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.SyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.ProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -40,10 +41,10 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
|
||||
}
|
||||
@@ -76,10 +77,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
|
||||
binary.LittleEndian.PutUint64(mix, uniqueNumber)
|
||||
@@ -92,7 +93,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateSeed_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
@@ -107,7 +108,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -14,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTotalBalance_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
|
||||
@@ -22,19 +23,19 @@ func TestTotalBalance_OK(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
|
||||
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
|
||||
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
wanted := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
@@ -51,7 +52,7 @@ func TestGetBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{0, 0, 0}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Balances: test.b})
|
||||
require.NoError(t, err)
|
||||
@@ -68,7 +69,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -76,7 +77,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -91,7 +92,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -99,7 +100,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
|
||||
}
|
||||
@@ -115,7 +116,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
{vCount: 10000, wantCount: 10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -123,7 +124,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -149,7 +150,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
@@ -167,7 +168,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -175,13 +176,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalityDelay(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -195,25 +196,25 @@ func TestFinalityDelay(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
d := FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
}
|
||||
|
||||
func TestIsInInactivityLeak(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -227,13 +228,13 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
}
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
@@ -285,7 +286,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -293,6 +294,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
|
||||
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
|
||||
func IsNextPeriodSyncCommittee(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
|
||||
func CurrentPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
func NextPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
|
||||
return indices
|
||||
}
|
||||
|
||||
// Retrieve the current sync period boundary root by calculating sync period start epoch
|
||||
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
|
||||
// and calling `BlockRoot`.
|
||||
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
// Can't call `BlockRoot` until the first slot.
|
||||
if st.Slot() == params.BeaconConfig().GenesisSlot {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -40,15 +41,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -70,13 +71,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -98,13 +99,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -127,15 +128,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -157,13 +158,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -185,13 +186,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -214,15 +215,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -243,27 +244,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
root, err := syncPeriodBoundaryRoot(state)
|
||||
root, err := helpers.SyncPeriodBoundaryRoot(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that cache was empty.
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
// Test that helper can retrieve the index given empty cache.
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
|
||||
// Test that cache was able to fill on miss.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -285,13 +286,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -314,15 +315,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -344,13 +345,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -372,43 +373,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
|
||||
|
||||
state, err = state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -435,7 +436,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -446,7 +447,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, comIdxs, newIdxs)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -32,7 +33,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
validator := ðpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
|
||||
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +54,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,7 +82,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +162,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Run("without trie", func(t *testing.T) {
|
||||
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
@@ -170,7 +171,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
})
|
||||
@@ -223,17 +224,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.slot))
|
||||
result, err := BeaconProposerIndex(context.Background(), state)
|
||||
result, err := helpers.BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err, "Failed to get shard and committees at slot")
|
||||
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
@@ -261,12 +262,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
// Set a very high slot, so that retrieved block root will be
|
||||
// non existent for the proposer cache.
|
||||
require.NoError(t, state.SetSlot(100))
|
||||
_, err = BeaconProposerIndex(context.Background(), state)
|
||||
_, err = helpers.BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -281,22 +282,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
proposerIndices = append(proposerIndices, index)
|
||||
}
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
@@ -309,15 +310,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(9999)
|
||||
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
|
||||
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
|
||||
}
|
||||
|
||||
func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
@@ -334,10 +335,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Preset cache to a bad count.
|
||||
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
|
||||
}
|
||||
@@ -353,7 +354,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -368,9 +369,9 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
@@ -386,7 +387,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
@@ -405,11 +406,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
@@ -574,11 +575,11 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -684,12 +685,12 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -718,9 +719,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -747,11 +748,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -765,7 +766,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -787,7 +788,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
}
|
||||
|
||||
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
@@ -806,13 +807,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
require.NoError(t, beaconState.SetBalances(balances))
|
||||
|
||||
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, index, primitives.ValidatorIndex(3))
|
||||
}
|
||||
|
||||
func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
e := primitives.Epoch(2)
|
||||
r := [32]byte{'a'}
|
||||
@@ -820,10 +821,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ids := [32]primitives.ValidatorIndex{}
|
||||
slot := primitives.Slot(69) // slot 5 in the Epoch
|
||||
ids[5] = primitives.ValidatorIndex(19)
|
||||
proposerIndicesCache.Set(e, r, ids)
|
||||
helpers.ProposerIndicesCache().Set(e, r, ids)
|
||||
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
|
||||
proposerIndicesCache.SetCheckpoint(*c, r)
|
||||
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
|
||||
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ids[5], id)
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.ROBlob, kc.count())
|
||||
scs := make([]blocks.ROBlob, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
@@ -111,7 +111,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
}
|
||||
scs[i] = *e.scs[i]
|
||||
scs = append(scs, *e.scs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -3,9 +3,14 @@ package das
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
@@ -23,3 +28,145 @@ func TestCacheEnsureDelete(t *testing.T) {
|
||||
var nilEntry *cacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
expected := make([]blocks.ROBlob, 0, nBlobs)
|
||||
for i := 0; i < commits.count(); i++ {
|
||||
if entry.diskSummary.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
|
||||
expected = append(expected, blobs[i])
|
||||
require.NoError(t, entry.stash(&blobs[i]))
|
||||
}
|
||||
require.Equal(t, numExpected, len(expected))
|
||||
return entry, commits, expected
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterDiskSummary(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup filterTestCaseSetupFunc
|
||||
}{
|
||||
{
|
||||
name: "full blobs, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
|
||||
},
|
||||
{
|
||||
name: "full blobs, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, middle on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
|
||||
},
|
||||
{
|
||||
name: "one commitment, on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
|
||||
},
|
||||
{
|
||||
name: "one commitment, not on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
|
||||
},
|
||||
{
|
||||
name: "two commitments, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "commitments mismatch - extra sidecar",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
commits[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "sidecar missing",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "commitments mismatch - different bytes",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5].KzgCommitment = []byte("nope")
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,10 +18,12 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -3,17 +3,20 @@ package filesystem
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
@@ -26,6 +29,7 @@ var (
|
||||
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -299,6 +303,15 @@ func (bs *BlobStorage) Clear() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
|
||||
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
|
||||
if requested > math.MaxUint64-bs.retentionEpochs {
|
||||
// If there is an overflow, then the retention period was set to an extremely large number.
|
||||
return true
|
||||
}
|
||||
return requested+bs.retentionEpochs >= current
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
@@ -323,3 +336,11 @@ func (p blobNamer) path() string {
|
||||
func rootString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
|
||||
}
|
||||
return bytesutil.ToBytes32(slice), nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
@@ -24,8 +25,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
@@ -129,8 +129,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
|
||||
@@ -161,8 +160,7 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
|
||||
@@ -218,8 +216,7 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
_, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
@@ -248,3 +245,50 @@ func TestNewBlobStorage(t *testing.T) {
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConfig_WithinRetentionPeriod(t *testing.T) {
|
||||
retention := primitives.Epoch(16)
|
||||
storage := &BlobStorage{retentionEpochs: retention}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requested primitives.Epoch
|
||||
current primitives.Epoch
|
||||
within bool
|
||||
}{
|
||||
{
|
||||
name: "before",
|
||||
requested: 0,
|
||||
current: retention + 1,
|
||||
within: false,
|
||||
},
|
||||
{
|
||||
name: "same",
|
||||
requested: 0,
|
||||
current: 0,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "boundary",
|
||||
requested: 0,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "one less",
|
||||
requested: retention - 1,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("overflow", func(t *testing.T) {
|
||||
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
|
||||
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -48,27 +48,26 @@ type BlobStorageSummarizer interface {
|
||||
type blobStorageCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]BlobStorageSummary
|
||||
cache map[[32]byte]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
cache: make(map[[32]byte]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
k := rootString(root)
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[k]
|
||||
return s.cache[root]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
@@ -84,7 +83,7 @@ func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
|
||||
func (s *blobStorageCache) slot(key [32]byte) (primitives.Slot, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
@@ -94,7 +93,7 @@ func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key string) {
|
||||
func (s *blobStorageCache) evict(key [32]byte) {
|
||||
var deleted float64
|
||||
s.mu.Lock()
|
||||
v, ok := s.cache[key]
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
sc := newBlobStorageCache()
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
@@ -66,7 +66,7 @@ func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStor
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
|
||||
if err := c.ensure(k, 0, uint64(v[i])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*bl
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
|
||||
if err := p.cache.ensure(root, latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
@@ -92,14 +92,16 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
defer func() {
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -160,7 +162,10 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
}
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root := rootFromDir(dir)
|
||||
root, err := rootFromDir(dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
@@ -218,7 +223,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.cache.evict(rootFromDir(dir))
|
||||
p.cache.evict(root)
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
@@ -235,8 +240,13 @@ func idxFromPath(fname string) (uint64, error) {
|
||||
return strconv.ParseUint(parts[0], 10, 64)
|
||||
}
|
||||
|
||||
func rootFromDir(dir string) string {
|
||||
return filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
|
||||
func rootFromDir(dir string) ([32]byte, error) {
|
||||
subdir := filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
|
||||
root, err := stringToRoot(subdir)
|
||||
if err != nil {
|
||||
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
|
||||
@@ -2,16 +2,19 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/spf13/afero"
|
||||
@@ -25,15 +28,43 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, fieldparams.MaxBlobsPerBlock)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, pr.windowSize)
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
}
|
||||
|
||||
func TestCacheWarmFail(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
n := blobNamer{root: bytesutil.ToBytes32([]byte("derp")), index: 0}
|
||||
bp := n.path()
|
||||
mkdir := path.Dir(bp)
|
||||
require.NoError(t, fs.MkdirAll(mkdir, directoryPermissions))
|
||||
|
||||
// Create an empty blob index in the fs by touching the file at a seemingly valid path.
|
||||
fi, err := fs.Create(bp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fi.Close())
|
||||
|
||||
// Cache warm should fail due to the unexpected EOF.
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, pr.warmCache(), errPruningFailures)
|
||||
|
||||
// The cache warm has finished, so calling waitForCache with a super short deadline
|
||||
// should not block or hit the context deadline.
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(1*time.Millisecond))
|
||||
defer cancel()
|
||||
c, err := pr.waitForCache(ctx)
|
||||
// We will get an error and a nil value for the cache if we hit the deadline.
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
}
|
||||
|
||||
func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
t.Run("empty directory", func(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
@@ -43,16 +74,15 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, slot+1)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(rootStr, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
t.Run("blobs to delete", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -62,20 +92,21 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, cok := bs.pruner.cache.slot(root)
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, cok := bs.pruner.cache.slot(scs[0].BlockRoot())
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, root)
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, root)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
@@ -83,8 +114,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
t.Run("expired blobs deleted", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -94,7 +124,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
@@ -104,20 +135,19 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, root)
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, root)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
t.Run("not expired, intact", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
@@ -128,24 +158,25 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
files, err := listDir(fs, root)
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
// This should use the slotFromFile code (simulating restart).
|
||||
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
|
||||
pruned, err := bs.pruner.tryPruneDir(root, slot)
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
|
||||
// Ensure files are still present.
|
||||
files, err = listDir(fs, root)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
})
|
||||
@@ -184,8 +215,7 @@ func TestSlotFromFile(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
@@ -320,3 +350,45 @@ func TestListDir(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootFromDir(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dir string
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
|
||||
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
|
||||
},
|
||||
{
|
||||
name: "too short",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "too log",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
root, err := stringToRoot(c.dir)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.root, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -94,7 +93,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -20,9 +20,7 @@ import (
|
||||
coreState "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
statenative "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -226,16 +224,14 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog *gethtypes.L
|
||||
"merkleTreeIndex": index,
|
||||
}).Info("Invalid deposit registered in deposit contract")
|
||||
}
|
||||
if features.Get().EnableEIP4881 {
|
||||
// We finalize the trie here so that old deposits are not kept around, as they make
|
||||
// deposit tree htr computation expensive.
|
||||
dTrie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.Errorf("wrong trie type initialized: %T", dTrie)
|
||||
}
|
||||
if err := dTrie.Finalize(index, depositLog.BlockHash, depositLog.BlockNumber); err != nil {
|
||||
log.WithError(err).Error("Could not finalize trie")
|
||||
}
|
||||
// We finalize the trie here so that old deposits are not kept around, as they make
|
||||
// deposit tree htr computation expensive.
|
||||
dTrie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.Errorf("wrong trie type initialized: %T", dTrie)
|
||||
}
|
||||
if err := dTrie.Finalize(index, depositLog.BlockHash, depositLog.BlockNumber); err != nil {
|
||||
log.WithError(err).Error("Could not finalize trie")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -579,25 +575,17 @@ func (s *Service) savePowchainData(ctx context.Context) error {
|
||||
BeaconState: pbState, // I promise not to mutate it!
|
||||
DepositContainers: s.cfg.depositCache.AllDepositContainers(ctx),
|
||||
}
|
||||
if features.Get().EnableEIP4881 {
|
||||
fd, err := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not get finalized deposit tree: %v", err)
|
||||
}
|
||||
tree, ok := fd.Deposits().(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.New("deposit tree was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = tree.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tree, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("deposit tree was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = tree.ToProto()
|
||||
fd, err := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not get finalized deposit tree: %v", err)
|
||||
}
|
||||
tree, ok := fd.Deposits().(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.New("deposit tree was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = tree.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
@@ -31,7 +31,7 @@ func TestProcessDepositLog_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
@@ -100,7 +100,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) {
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -216,7 +216,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -291,7 +291,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
@@ -384,7 +384,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
kvStore := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -481,7 +481,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
kvStore := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -593,7 +593,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
func newPowchainService(t *testing.T, eth1Backend *mock.TestAccount, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
server, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -164,14 +163,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop()
|
||||
var depositTrie cache.MerkleTree
|
||||
var err error
|
||||
if features.Get().EnableEIP4881 {
|
||||
depositTrie = depositsnapshot.NewDepositTree()
|
||||
} else {
|
||||
depositTrie, err = trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up deposit trie")
|
||||
}
|
||||
}
|
||||
depositTrie = depositsnapshot.NewDepositTree()
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up genesis state")
|
||||
@@ -740,20 +732,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if features.Get().EnableEIP4881 {
|
||||
if eth1DataInDB.DepositSnapshot != nil {
|
||||
s.depositTrie, err = depositsnapshot.DepositTreeFromSnapshotProto(eth1DataInDB.DepositSnapshot)
|
||||
} else {
|
||||
if err := s.migrateOldDepositTree(eth1DataInDB); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if eth1DataInDB.DepositSnapshot != nil {
|
||||
s.depositTrie, err = depositsnapshot.DepositTreeFromSnapshotProto(eth1DataInDB.DepositSnapshot)
|
||||
} else {
|
||||
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
|
||||
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
|
||||
"Remove the --%s flag to resume normal operations.", features.DisableEIP4881.Name)
|
||||
if err = s.migrateOldDepositTree(eth1DataInDB); err != nil {
|
||||
return err
|
||||
}
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -766,21 +750,19 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
}
|
||||
}
|
||||
s.latestEth1Data = eth1DataInDB.CurrentEth1Data
|
||||
if features.Get().EnableEIP4881 {
|
||||
ctrs := eth1DataInDB.DepositContainers
|
||||
// Look at previously finalized index, as we are building off a finalized
|
||||
// snapshot rather than the full trie.
|
||||
lastFinalizedIndex := int64(s.depositTrie.NumOfItems() - 1)
|
||||
// Correctly initialize missing deposits into active trie.
|
||||
for _, c := range ctrs {
|
||||
if c.Index > lastFinalizedIndex {
|
||||
depRoot, err := c.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.depositTrie.Insert(depRoot[:], int(c.Index)); err != nil {
|
||||
return err
|
||||
}
|
||||
ctrs := eth1DataInDB.DepositContainers
|
||||
// Look at previously finalized index, as we are building off a finalized
|
||||
// snapshot rather than the full trie.
|
||||
lastFinalizedIndex := int64(s.depositTrie.NumOfItems() - 1)
|
||||
// Correctly initialize missing deposits into active trie.
|
||||
for _, c := range ctrs {
|
||||
if c.Index > lastFinalizedIndex {
|
||||
depRoot, err := c.Deposit.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.depositTrie.Insert(depRoot[:], int(c.Index)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -847,21 +829,13 @@ func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData,
|
||||
BeaconState: pbState,
|
||||
DepositContainers: s.cfg.depositCache.AllDepositContainers(ctx),
|
||||
}
|
||||
if features.Get().EnableEIP4881 {
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return nil, errors.New("deposit trie was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = trie.ToProto()
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
@@ -348,7 +348,7 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
cfg: &config{beaconDB: beaconDB},
|
||||
}
|
||||
var err error
|
||||
s.cfg.depositCache, err = depositcache.New()
|
||||
s.cfg.depositCache, err = depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
@@ -409,7 +409,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
cfg: &config{beaconDB: beaconDB},
|
||||
}
|
||||
var err error
|
||||
s.cfg.depositCache, err = depositcache.New()
|
||||
s.cfg.depositCache, err = depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
@@ -553,7 +553,7 @@ func Test_batchRequestHeaders_UnderflowChecks(t *testing.T) {
|
||||
|
||||
func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
cache, err := depositcache.New()
|
||||
cache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -583,7 +583,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
|
||||
func TestService_InitializeCorrectly(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
cache, err := depositcache.New()
|
||||
cache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
@@ -614,7 +614,7 @@ func TestService_InitializeCorrectly(t *testing.T) {
|
||||
|
||||
func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
cache, err := depositcache.New()
|
||||
cache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
@@ -809,7 +809,7 @@ func (s *slowRPCClient) CallContext(_ context.Context, _ interface{}, _ string,
|
||||
|
||||
func TestService_migrateOldDepositTree(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
cache, err := depositcache.New()
|
||||
cache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, endpoint, err := mockExecution.SetupRPCServer()
|
||||
|
||||
@@ -21,7 +21,6 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -567,11 +566,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
b.db = d
|
||||
|
||||
if features.Get().EnableEIP4881 {
|
||||
depositCache, err = depositsnapshot.New()
|
||||
} else {
|
||||
depositCache, err = depositcache.New()
|
||||
}
|
||||
depositCache, err = depositsnapshot.New()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create deposit cache")
|
||||
}
|
||||
@@ -643,9 +638,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
|
||||
@@ -142,7 +142,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": att.Data.Slot,
|
||||
"currentSlot": currSlot,
|
||||
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
|
||||
}).WithError(err).Debug("Attestation is too old to broadcast, discarding it")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
@@ -106,7 +108,7 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
@@ -122,29 +124,41 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
}(peerInfo)
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
wg.Add(1)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}(peerInfo)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,6 +398,17 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestHostIsResolved(t *testing.T) {
|
||||
// As defined in RFC 2606 , example.org is a
|
||||
// reserved example domain name.
|
||||
exampleHost := "example.org"
|
||||
exampleIP := "93.184.216.34"
|
||||
exampleIP := "93.184.215.14"
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
|
||||
@@ -2,10 +2,14 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const backOffCounter = 50
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
@@ -24,13 +28,21 @@ type filterIter struct {
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
lookupCounter := 0
|
||||
for f.Iterator.Next() {
|
||||
// Do not excessively perform lookups if we constantly receive non-viable peers.
|
||||
if lookupCounter > backOffCounter {
|
||||
lookupCounter = 0
|
||||
runtime.Gosched()
|
||||
time.Sleep(pollingPeriod)
|
||||
}
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
lookupCounter++
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.WithField("peerId", id).Info("Running node with")
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priKey),
|
||||
|
||||
@@ -87,7 +87,12 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch))
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -369,16 +369,7 @@ func decodeIds(w http.ResponseWriter, st state.BeaconState, rawIds []string, ign
|
||||
func valsFromIds(w http.ResponseWriter, st state.BeaconState, ids []primitives.ValidatorIndex) ([]state.ReadOnlyValidator, bool) {
|
||||
var vals []state.ReadOnlyValidator
|
||||
if len(ids) == 0 {
|
||||
allVals := st.Validators()
|
||||
vals = make([]state.ReadOnlyValidator, len(allVals))
|
||||
for i, val := range allVals {
|
||||
readOnlyVal, err := statenative.NewValidator(val)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not convert validator: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
vals[i] = readOnlyVal
|
||||
}
|
||||
vals = st.ValidatorsReadOnly()
|
||||
} else {
|
||||
vals = make([]state.ReadOnlyValidator, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
|
||||
@@ -108,7 +108,7 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
||||
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||
defer span.End()
|
||||
|
||||
rawSyncingStatus, syncingStatus, ok := shared.UintFromQuery(w, r, "syncing_status", false)
|
||||
@@ -119,10 +119,14 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if s.SyncChecker.Synced() {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Syncing() || s.SyncChecker.Initialized() {
|
||||
if s.SyncChecker.Syncing() || optimistic {
|
||||
if rawSyncingStatus != "" {
|
||||
w.WriteHeader(intSyncingStatus)
|
||||
} else {
|
||||
@@ -130,5 +134,6 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
}
|
||||
|
||||
@@ -91,8 +91,10 @@ func TestGetVersion(t *testing.T) {
|
||||
|
||||
func TestGetHealth(t *testing.T) {
|
||||
checker := &syncmock.Sync{}
|
||||
optimisticFetcher := &mock.ChainService{Optimistic: false}
|
||||
s := &Server{
|
||||
SyncChecker: checker,
|
||||
SyncChecker: checker,
|
||||
OptimisticModeFetcher: optimisticFetcher,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
@@ -101,25 +103,30 @@ func TestGetHealth(t *testing.T) {
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
|
||||
checker.IsInitialized = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPartialContent, writer.Code)
|
||||
|
||||
checker.IsSyncing = true
|
||||
checker.IsSynced = false
|
||||
request = httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/node/health?syncing_status=%d", http.StatusPaymentRequired), nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPaymentRequired, writer.Code)
|
||||
|
||||
checker.IsSyncing = false
|
||||
checker.IsSynced = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
checker.IsSyncing = false
|
||||
checker.IsSynced = true
|
||||
optimisticFetcher.Optimistic = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPartialContent, writer.Code)
|
||||
}
|
||||
|
||||
func TestGetIdentity(t *testing.T) {
|
||||
|
||||
@@ -664,7 +664,7 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
committeeAssignments, _, err := helpers.CommitteeAssignments(ctx, st, requestedEpoch)
|
||||
assignments, err := helpers.CommitteeAssignments(ctx, st, requestedEpoch, requestedValIndices)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -684,7 +684,7 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, fmt.Sprintf("Invalid validator index %d", index), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
committee := committeeAssignments[index]
|
||||
committee := assignments[index]
|
||||
if committee == nil {
|
||||
continue
|
||||
}
|
||||
@@ -708,10 +708,20 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
dependentRoot, err := attestationDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
var dependentRoot []byte
|
||||
if requestedEpoch == 0 {
|
||||
r, err := s.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get genesis block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = attestationDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
@@ -793,11 +803,11 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
var proposals map[primitives.ValidatorIndex][]primitives.Slot
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
_, proposals, err = helpers.CommitteeAssignments(ctx, st, nextEpoch)
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, nextEpoch)
|
||||
} else {
|
||||
_, proposals, err = helpers.CommitteeAssignments(ctx, st, requestedEpoch)
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, requestedEpoch)
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -805,7 +815,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
duties := make([]*structs.ProposerDuty, 0)
|
||||
for index, proposalSlots := range proposals {
|
||||
for index, proposalSlots := range assignments {
|
||||
val, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get validator at index %d: %v", index, err), http.StatusInternalServerError)
|
||||
@@ -822,10 +832,20 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
dependentRoot, err := proposalDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
var dependentRoot []byte
|
||||
if requestedEpoch == 0 {
|
||||
r, err := s.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get genesis block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = proposalDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -1443,6 +1443,9 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
@@ -1453,6 +1456,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
t.Run("single validator", func(t *testing.T) {
|
||||
@@ -1619,7 +1623,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
blk.Block.Slot = 31
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
db := dbutil.SetupDB(t)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
@@ -1632,6 +1635,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
@@ -1693,6 +1697,9 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
pubKeys[i] = deposits[i].Data.PublicKey
|
||||
}
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
@@ -1710,6 +1717,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
@@ -1737,6 +1745,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
t.Run("next epoch", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, bs.SetBlockRoots(roots))
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
@@ -1750,6 +1759,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
@@ -1792,6 +1802,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(bs.Slot())
|
||||
@@ -1808,7 +1819,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
assert.StringContains(t, fmt.Sprintf("Request epoch %d can not be greater than next epoch %d", currentEpoch+2, currentEpoch+1), e.Message)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
// Set state to non-epoch start slot.
|
||||
@@ -1818,11 +1828,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.Slot = 31
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
db := dbutil.SetupDB(t)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
@@ -1836,6 +1841,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
|
||||
@@ -223,7 +223,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "failed to retrieve block from db"), Reason: core.Internal}
|
||||
}
|
||||
// if block is not in the retention window return 200 w/ empty list
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
|
||||
@@ -164,8 +164,7 @@ func TestGetBlob(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
|
||||
_, bs, err := filesystem.NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
_, bs := filesystem.NewEphemeralBlobStorageWithFs(t)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
for i := range testSidecars {
|
||||
|
||||
@@ -105,23 +105,28 @@ func (bs *Server) ListValidatorAssignments(
|
||||
}
|
||||
|
||||
// Initialize all committee related data.
|
||||
committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, requestedState, requestedEpoch)
|
||||
assignments, err := helpers.CommitteeAssignments(ctx, requestedState, requestedEpoch, filteredIndices[start:end])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
|
||||
proposalSlots, err := helpers.ProposerAssignments(ctx, requestedState, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
for _, index := range filteredIndices[start:end] {
|
||||
if uint64(index) >= uint64(requestedState.NumValidators()) {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d",
|
||||
index, requestedState.NumValidators())
|
||||
}
|
||||
comAssignment := committeeAssignments[index]
|
||||
a := assignments[index]
|
||||
pubkey := requestedState.PubkeyAtIndex(index)
|
||||
assign := ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: comAssignment.Committee,
|
||||
CommitteeIndex: comAssignment.CommitteeIndex,
|
||||
AttesterSlot: comAssignment.AttesterSlot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
BeaconCommittees: a.Committee,
|
||||
CommitteeIndex: a.CommitteeIndex,
|
||||
AttesterSlot: a.AttesterSlot,
|
||||
ProposerSlots: proposalSlots[index],
|
||||
PublicKey: pubkey[:],
|
||||
ValidatorIndex: index,
|
||||
}
|
||||
|
||||
@@ -174,16 +174,18 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), s, 0)
|
||||
assignments, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[0:params.BeaconConfig().DefaultPageSize])
|
||||
require.NoError(t, err)
|
||||
proposerSlots, err := helpers.ProposerAssignments(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
for _, index := range activeIndices[0:params.BeaconConfig().DefaultPageSize] {
|
||||
val, err := s.ValidatorAtIndex(index)
|
||||
require.NoError(t, err)
|
||||
wanted = append(wanted, ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: committeeAssignments[index].Committee,
|
||||
CommitteeIndex: committeeAssignments[index].CommitteeIndex,
|
||||
AttesterSlot: committeeAssignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
BeaconCommittees: assignments[index].Committee,
|
||||
CommitteeIndex: assignments[index].CommitteeIndex,
|
||||
AttesterSlot: assignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerSlots[index],
|
||||
PublicKey: val.PublicKey,
|
||||
ValidatorIndex: index,
|
||||
})
|
||||
@@ -244,16 +246,18 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T)
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), s, 0)
|
||||
assignments, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[1:4])
|
||||
require.NoError(t, err)
|
||||
proposerSlots, err := helpers.ProposerAssignments(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
for _, index := range activeIndices[1:4] {
|
||||
val, err := s.ValidatorAtIndex(index)
|
||||
require.NoError(t, err)
|
||||
wanted = append(wanted, ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: committeeAssignments[index].Committee,
|
||||
CommitteeIndex: committeeAssignments[index].CommitteeIndex,
|
||||
AttesterSlot: committeeAssignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
BeaconCommittees: assignments[index].Committee,
|
||||
CommitteeIndex: assignments[index].CommitteeIndex,
|
||||
AttesterSlot: assignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerSlots[index],
|
||||
PublicKey: val.PublicKey,
|
||||
ValidatorIndex: index,
|
||||
})
|
||||
@@ -312,16 +316,18 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), s, 0)
|
||||
as, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[3:5])
|
||||
require.NoError(t, err)
|
||||
proposalSlots, err := helpers.ProposerAssignments(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
for _, index := range activeIndices[3:5] {
|
||||
val, err := s.ValidatorAtIndex(index)
|
||||
require.NoError(t, err)
|
||||
assignments = append(assignments, ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: committeeAssignments[index].Committee,
|
||||
CommitteeIndex: committeeAssignments[index].CommitteeIndex,
|
||||
AttesterSlot: committeeAssignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
BeaconCommittees: as[index].Committee,
|
||||
CommitteeIndex: as[index].CommitteeIndex,
|
||||
AttesterSlot: as[index].AttesterSlot,
|
||||
ProposerSlots: proposalSlots[index],
|
||||
PublicKey: val.PublicKey,
|
||||
ValidatorIndex: index,
|
||||
})
|
||||
@@ -340,16 +346,18 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
|
||||
req = ðpb.ListValidatorAssignmentsRequest{Indices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}, PageSize: 5, PageToken: "1"}
|
||||
res, err = bs.ListValidatorAssignments(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
cAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), s, 0)
|
||||
as, err = helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[6:7])
|
||||
require.NoError(t, err)
|
||||
proposalSlots, err = helpers.ProposerAssignments(ctx, s, 0)
|
||||
require.NoError(t, err)
|
||||
for _, index := range activeIndices[6:7] {
|
||||
val, err := s.ValidatorAtIndex(index)
|
||||
require.NoError(t, err)
|
||||
assignments = append(assignments, ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: cAssignments[index].Committee,
|
||||
CommitteeIndex: cAssignments[index].CommitteeIndex,
|
||||
AttesterSlot: cAssignments[index].AttesterSlot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
BeaconCommittees: as[index].Committee,
|
||||
CommitteeIndex: as[index].CommitteeIndex,
|
||||
AttesterSlot: as[index].AttesterSlot,
|
||||
ProposerSlots: proposalSlots[index],
|
||||
PublicKey: val.PublicKey,
|
||||
ValidatorIndex: index,
|
||||
})
|
||||
|
||||
@@ -37,7 +37,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -112,7 +112,7 @@ common_deps = [
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/builder/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -52,16 +53,31 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", epochStartSlot, err)
|
||||
}
|
||||
}
|
||||
committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, s, req.Epoch)
|
||||
|
||||
requestIndices := make([]primitives.ValidatorIndex, 0, len(req.PublicKeys))
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
requestIndices = append(requestIndices, idx)
|
||||
}
|
||||
|
||||
assignments, err := helpers.CommitteeAssignments(ctx, s, req.Epoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
// Query the next epoch assignments for committee subnet subscriptions.
|
||||
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
|
||||
nextEpochAssignments, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1, requestIndices)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
|
||||
}
|
||||
|
||||
proposalSlots, err := helpers.ProposerAssignments(ctx, s, req.Epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
validatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
|
||||
@@ -81,20 +97,20 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
|
||||
assignment.ValidatorIndex = idx
|
||||
assignment.Status = s
|
||||
assignment.ProposerSlots = proposerIndexToSlots[idx]
|
||||
assignment.ProposerSlots = proposalSlots[idx]
|
||||
|
||||
// The next epoch has no lookup for proposer indexes.
|
||||
nextAssignment.ValidatorIndex = idx
|
||||
nextAssignment.Status = s
|
||||
|
||||
ca, ok := committeeAssignments[idx]
|
||||
ca, ok := assignments[idx]
|
||||
if ok {
|
||||
assignment.Committee = ca.Committee
|
||||
assignment.AttesterSlot = ca.AttesterSlot
|
||||
assignment.CommitteeIndex = ca.CommitteeIndex
|
||||
}
|
||||
// Save the next epoch assignments.
|
||||
ca, ok = nextCommitteeAssignments[idx]
|
||||
ca, ok = nextEpochAssignments[idx]
|
||||
if ok {
|
||||
nextAssignment.Committee = ca.Committee
|
||||
nextAssignment.AttesterSlot = ca.AttesterSlot
|
||||
@@ -123,9 +139,9 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
// Next epoch sync committee duty is assigned with next period sync committee only during
|
||||
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
|
||||
// next epoch sync committee duty is the same as current epoch.
|
||||
nextSlotToEpoch := slots.ToEpoch(s.Slot() + 1)
|
||||
nextEpoch := req.Epoch + 1
|
||||
currentEpoch := coreTime.CurrentEpoch(s)
|
||||
if slots.SyncCommitteePeriod(nextSlotToEpoch) == slots.SyncCommitteePeriod(currentEpoch)+1 {
|
||||
if slots.SyncCommitteePeriod(nextEpoch) > slots.SyncCommitteePeriod(currentEpoch) {
|
||||
nextAssignment.IsSyncCommittee, err = helpers.IsNextPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -334,7 +334,7 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -362,25 +363,32 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
if err := vs.P2P.BroadcastBlob(ctx, uint64(i), sc); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
}
|
||||
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
verifiedBlob := blocks.NewVerifiedROBlob(readOnlySc)
|
||||
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedBlob); err != nil {
|
||||
return errors.Wrap(err, "receive blob failed")
|
||||
}
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BlobSidecarReceived,
|
||||
Data: &operation.BlobSidecarReceivedData{Blob: &verifiedBlob},
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
subIdx := i
|
||||
sCar := sc
|
||||
eg.Go(func() error {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
}
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
verifiedBlob := blocks.NewVerifiedROBlob(readOnlySc)
|
||||
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedBlob); err != nil {
|
||||
return errors.Wrap(err, "receive blob failed")
|
||||
}
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BlobSidecarReceived,
|
||||
Data: &operation.BlobSidecarReceivedData{Blob: &verifiedBlob},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
@@ -1000,7 +1000,7 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
@@ -1139,7 +1139,7 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
@@ -1244,7 +1244,7 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, dp := range append(readyDeposits, recentDeposits...) {
|
||||
@@ -1344,7 +1344,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, dp := range append(readyDeposits, recentDeposits...) {
|
||||
@@ -1442,7 +1442,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, dp := range append(readyDeposits, recentDeposits...) {
|
||||
@@ -1553,7 +1553,7 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
@@ -1669,7 +1669,7 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
@@ -1810,7 +1810,7 @@ func TestProposer_Eth1Data_MajorityVote_SpansGenesis(t *testing.T) {
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
headBlockHash := []byte("headb")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
ps := &Server{
|
||||
ChainStartFetcher: p,
|
||||
@@ -1851,7 +1851,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -2351,7 +2351,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
BlockHash: []byte("eth1data"),
|
||||
}
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -2547,7 +2547,7 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, dp := range append(readyDeposits, recentDeposits...) {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -69,7 +69,7 @@ type Server struct {
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositcache.PendingDepositsFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen stategen.StateManager
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
@@ -99,10 +99,15 @@ func (vs *Server) WaitForActivation(req *ethpb.ValidatorActivationRequest, strea
|
||||
return status.Errorf(codes.Internal, "Could not send response over stream: %v", err)
|
||||
}
|
||||
|
||||
waitTime := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
timer := time.NewTimer(waitTime)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
timer.Reset(waitTime)
|
||||
select {
|
||||
// Pinging every slot for activation.
|
||||
case <-time.After(time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second):
|
||||
case <-timer.C:
|
||||
activeValidatorExists, validatorStatuses, err := vs.activationStatus(stream.Context(), req.PublicKeys)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not fetch validator status: %v", err)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -65,7 +65,7 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -70,7 +70,7 @@ func TestWaitForActivation_ContextClosed(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -40,7 +40,7 @@ func TestValidatorStatus_Active(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
@@ -38,7 +38,7 @@ func TestValidatorStatus_DepositedEth1(t *testing.T) {
|
||||
pubKey1 := deposit.Data.PublicKey
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -80,7 +80,7 @@ func TestValidatorStatus_Deposited(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -125,7 +125,7 @@ func TestValidatorStatus_PartiallyDeposited(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -178,7 +178,7 @@ func TestValidatorStatus_Pending_MultipleDeposits(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -255,7 +255,7 @@ func TestValidatorStatus_Pending(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -318,7 +318,7 @@ func TestValidatorStatus_Exiting(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -377,7 +377,7 @@ func TestValidatorStatus_Slashing(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -435,7 +435,7 @@ func TestValidatorStatus_Exited(t *testing.T) {
|
||||
}
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -464,7 +464,7 @@ func TestValidatorStatus_Exited(t *testing.T) {
|
||||
|
||||
func TestValidatorStatus_UnknownStatus(t *testing.T) {
|
||||
pubKey := pubKey(1)
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
stateObj, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{
|
||||
@@ -520,7 +520,7 @@ func TestActivationStatus_OK(t *testing.T) {
|
||||
dep := deposits[0]
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
@@ -659,7 +659,7 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) {
|
||||
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < 6; i++ {
|
||||
@@ -751,7 +751,7 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dep := deposits[0]
|
||||
@@ -916,7 +916,7 @@ func TestValidatorStatus_Invalid(t *testing.T) {
|
||||
deposit.Data.Signature = deposit.Data.Signature[1:]
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not setup deposit trie")
|
||||
depositCache, err := depositcache.New()
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -127,7 +127,7 @@ type Config struct {
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
DepositFetcher cache.DepositFetcher
|
||||
PendingDepositFetcher depositcache.PendingDepositsFetcher
|
||||
PendingDepositFetcher depositsnapshot.PendingDepositsFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier opfeed.Notifier
|
||||
|
||||
@@ -118,6 +118,7 @@ type ReadOnlyValidator interface {
|
||||
// ReadOnlyValidators defines a struct which only has read access to validators methods.
|
||||
type ReadOnlyValidators interface {
|
||||
Validators() []*ethpb.Validator
|
||||
ValidatorsReadOnly() []ReadOnlyValidator
|
||||
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
|
||||
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
|
||||
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||
|
||||
24
beacon-chain/state/state-native/README.md
Normal file
24
beacon-chain/state/state-native/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## Adding a new field to the state
|
||||
|
||||
Note: Whenever only the name of a file is provided, it's assumed to be in the `/beacon-chain/state/state-native` package.
|
||||
|
||||
- Add a `BeaconState[Version]FieldCount` configuration item to `/config/params/config.go` and set it in `/config/params/mainnet_config.go`.
|
||||
- Add the field to the `BeaconState` struct in `beacon_state_mainnet.go` and `beacon_state_minimal.go`. Update the marshaling code too.
|
||||
- Add the field's metadata to `/beacon-chain/state/state-native/types/types.go`.
|
||||
- Add a getter and a setter for the field, either to existing `getter_XXX.go`/`setter_XXX.go` files or create new ones if the field doesn't fit anywhere.
|
||||
Add the new getter and setter to `/beacon-chain/state/interfaces.go`.
|
||||
- Update state hashing in `hasher.go`.
|
||||
- Update `ToProtoUnsafe()` and `ToProto()` functions and add a new `ProtobufBeaconState[Version]` function, all in `getters_state.go`.
|
||||
- If the field is a multi-value slice, update `multi_value_slices.go`.
|
||||
- Update `spec_parameters.go`.
|
||||
- Update `state_trie.go`:
|
||||
- Add a `[version]Fields` variable that contains all fields of the new state version.
|
||||
- Add a `[version]SharedFieldRefCount` constant that represents the number of fields whose references are shared between states.
|
||||
- Add an `experimentalState[Version]SharedFieldCountRef` constant that represents the number of **non multi-value slice** fields whose references are shared
|
||||
between states.
|
||||
- Add the following functions: `InitializeFromProto[Version]()`, `InitializeFromProtoUnsafe[Version]()`.
|
||||
- Update the following functions: `Copy()`, `initializeMerkleLayers()`, `RecordStateMetrics()` (applies only to multi-value slice fields), `rootSelector()`,
|
||||
`finalizerCleanup()` (applies only to multi-value slice fields).
|
||||
- If the field is a slice, add it to the field map in `types.go`.
|
||||
- If the field is a slice, update the `fieldConverters()` function in `/beacon-chain/state/fieldtrie/field_trie_helpers.go`. The exact implementation will vary
|
||||
depending on a few factors (is the field similar to an existing one, is it a multi-value slice etc.)
|
||||
@@ -21,6 +21,14 @@ func (b *BeaconState) Validators() []*ethpb.Validator {
|
||||
return b.validatorsVal()
|
||||
}
|
||||
|
||||
// ValidatorsReadOnly participating in consensus on the beacon chain.
|
||||
func (b *BeaconState) ValidatorsReadOnly() []state.ReadOnlyValidator {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.validatorsReadOnlyVal()
|
||||
}
|
||||
|
||||
func (b *BeaconState) validatorsVal() []*ethpb.Validator {
|
||||
var v []*ethpb.Validator
|
||||
if features.Get().EnableExperimentalState {
|
||||
@@ -46,6 +54,35 @@ func (b *BeaconState) validatorsVal() []*ethpb.Validator {
|
||||
return res
|
||||
}
|
||||
|
||||
func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
|
||||
var v []*ethpb.Validator
|
||||
if features.Get().EnableExperimentalState {
|
||||
if b.validatorsMultiValue == nil {
|
||||
return nil
|
||||
}
|
||||
v = b.validatorsMultiValue.Value(b)
|
||||
} else {
|
||||
if b.validators == nil {
|
||||
return nil
|
||||
}
|
||||
v = b.validators
|
||||
}
|
||||
|
||||
res := make([]state.ReadOnlyValidator, len(v))
|
||||
var err error
|
||||
for i := 0; i < len(res); i++ {
|
||||
val := v[i]
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
res[i], err = NewValidator(val)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// references of validators participating in consensus on the beacon chain.
|
||||
// This assumes that a lock is already held on BeaconState. This does not
|
||||
// copy fully and instead just copies the reference.
|
||||
|
||||
@@ -20,34 +20,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorMap_DistinctCopy(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
var someRoot [32]byte
|
||||
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
}
|
||||
handler := stateutil.NewValMapHandler(vals)
|
||||
newHandler := handler.Copy()
|
||||
wantedPubkey := strconv.Itoa(22)
|
||||
handler.Set(bytesutil.ToBytes48([]byte(wantedPubkey)), 27)
|
||||
val1, _ := handler.Get(bytesutil.ToBytes48([]byte(wantedPubkey)))
|
||||
val2, _ := newHandler.Get(bytesutil.ToBytes48([]byte(wantedPubkey)))
|
||||
assert.NotEqual(t, val1, val2, "Values are supposed to be unequal due to copy")
|
||||
}
|
||||
|
||||
func TestBeaconState_NoDeadlock_Phase0(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -36,24 +36,6 @@ func (v *ValidatorMapHandler) IsNil() bool {
|
||||
return v.mapRef == nil || v.valIdxMap == nil
|
||||
}
|
||||
|
||||
// Copy the whole map and returns a map handler with the copied map.
|
||||
func (v *ValidatorMapHandler) Copy() *ValidatorMapHandler {
|
||||
if v == nil || v.valIdxMap == nil {
|
||||
return &ValidatorMapHandler{valIdxMap: map[[fieldparams.BLSPubkeyLength]byte]primitives.ValidatorIndex{}, mapRef: new(Reference), RWMutex: new(sync.RWMutex)}
|
||||
}
|
||||
v.RLock()
|
||||
defer v.RUnlock()
|
||||
m := make(map[[fieldparams.BLSPubkeyLength]byte]primitives.ValidatorIndex, len(v.valIdxMap))
|
||||
for k, v := range v.valIdxMap {
|
||||
m[k] = v
|
||||
}
|
||||
return &ValidatorMapHandler{
|
||||
valIdxMap: m,
|
||||
mapRef: &Reference{refs: 1},
|
||||
RWMutex: new(sync.RWMutex),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the validator index using the corresponding public key.
|
||||
func (v *ValidatorMapHandler) Get(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||
v.RLock()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
@@ -55,6 +56,8 @@ const (
|
||||
batchEndSequence
|
||||
)
|
||||
|
||||
var retryDelay = time.Second
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
@@ -62,6 +65,7 @@ type batch struct {
|
||||
scheduled time.Time
|
||||
seq int // sequence identifier, ie how many times has the sequence() method served this batch
|
||||
retries int
|
||||
retryAfter time.Time
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results verifiedROBlocks
|
||||
@@ -74,7 +78,7 @@ type batch struct {
|
||||
}
|
||||
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
return map[string]interface{}{
|
||||
f := map[string]interface{}{
|
||||
"batchId": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
@@ -86,6 +90,10 @@ func (b batch) logFields() logrus.Fields {
|
||||
"blockPid": b.blockPid,
|
||||
"blobPid": b.blobPid,
|
||||
}
|
||||
if b.retries > 0 {
|
||||
f["retryAfter"] = b.retryAfter.String()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (b batch) replaces(r batch) bool {
|
||||
@@ -153,7 +161,8 @@ func (b batch) withState(s batchState) batch {
|
||||
switch b.state {
|
||||
case batchErrRetryable:
|
||||
b.retries += 1
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry")
|
||||
b.retryAfter = time.Now().Add(retryDelay)
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry after delay")
|
||||
case batchInit, batchNil:
|
||||
b.firstScheduled = b.scheduled
|
||||
}
|
||||
@@ -190,8 +199,32 @@ func (b batch) availabilityStore() das.AvailabilityStore {
|
||||
return b.bs.store
|
||||
}
|
||||
|
||||
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
|
||||
log.WithFields(b.logFields()).WithField("untilRetry", untilRetry.String()).
|
||||
Debug("Sleeping for retry backoff delay")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(untilRetry):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) waitUntilReady(ctx context.Context) error {
|
||||
// Wait to retry a failed batch to avoid hammering peers
|
||||
// if we've hit a state where batches will consistently fail.
|
||||
// Avoids spamming requests and logs.
|
||||
if b.retries > 0 {
|
||||
untilRetry := time.Until(b.retryAfter)
|
||||
if untilRetry > time.Millisecond {
|
||||
return batchBlockUntil(ctx, untilRetry, b)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortBatchDesc(bb []batch) {
|
||||
sort.Slice(bb, func(i, j int) bool {
|
||||
return bb[j].end < bb[i].end
|
||||
return bb[i].end > bb[j].end
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -19,3 +22,22 @@ func TestSortBatchDesc(t *testing.T) {
|
||||
require.Equal(t, orderOut[i], batches[i].end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitUntilReady(t *testing.T) {
|
||||
b := batch{}.withState(batchErrRetryable)
|
||||
require.Equal(t, time.Time{}, b.retryAfter)
|
||||
var got time.Duration
|
||||
wur := batchBlockUntil
|
||||
var errDerp = errors.New("derp")
|
||||
batchBlockUntil = func(_ context.Context, ur time.Duration, _ batch) error {
|
||||
got = ur
|
||||
return errDerp
|
||||
}
|
||||
// retries counter and timestamp are set when we mark the batch for sequencing, if it is in the retry state
|
||||
b = b.withState(batchSequenced)
|
||||
require.ErrorIs(t, b.waitUntilReady(context.Background()), errDerp)
|
||||
require.Equal(t, true, retryDelay-time.Until(b.retryAfter) < time.Millisecond)
|
||||
require.Equal(t, true, got < retryDelay && got > retryDelay-time.Millisecond)
|
||||
require.Equal(t, 1, b.retries)
|
||||
batchBlockUntil = wur
|
||||
}
|
||||
|
||||
@@ -143,6 +143,11 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
if err := todo[0].waitUntilReady(p.ctx); err != nil {
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
busy[pid] = true
|
||||
todo[0].busy = pid
|
||||
p.toWorkers <- todo[0].withPeer(pid)
|
||||
|
||||
@@ -461,7 +461,7 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
}
|
||||
return &p2ppb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: r.low,
|
||||
Count: uint64(r.high.SubSlot(r.low)) + 1,
|
||||
Count: uint64(r.high.FlooredSubSlot(r.low)) + 1,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -482,7 +482,10 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
func TestBlobValidatorFromRootReq(t *testing.T) {
|
||||
rootA := bytesutil.PadTo([]byte("valid"), 32)
|
||||
rootB := bytesutil.PadTo([]byte("invalid"), 32)
|
||||
header := ðpb.SignedBeaconBlockHeader{}
|
||||
header := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{Slot: 0},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
blobSidecarA0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 0, []byte{}, make([][]byte, 0))
|
||||
blobSidecarA1 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 1, []byte{}, make([][]byte, 0))
|
||||
blobSidecarB0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootB), header, 0, []byte{}, make([][]byte, 0))
|
||||
@@ -590,7 +593,8 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
vf := blobValidatorFromRangeReq(c.req)
|
||||
header := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{Slot: c.responseSlot},
|
||||
Header: ðpb.BeaconBlockHeader{Slot: c.responseSlot},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
sc := util.GenerateTestDenebBlobSidecar(t, [32]byte{}, header, 0, []byte{}, make([][]byte, 0))
|
||||
err := vf(sc)
|
||||
|
||||
@@ -46,10 +46,12 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
blkSig := blk.Signature()
|
||||
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
|
||||
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
|
||||
for i := range scs {
|
||||
if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) {
|
||||
blobSig := bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature)
|
||||
if blkSig != blobSig {
|
||||
return nil, ErrBatchSignatureMismatch
|
||||
}
|
||||
// Extra defensive check to make sure the roots match. This should be unnecessary in practice since the root from
|
||||
|
||||
@@ -227,6 +227,12 @@ var (
|
||||
Usage: "Sets the minimum number of peers that a node will attempt to peer with that are subscribed to a subnet.",
|
||||
Value: 6,
|
||||
}
|
||||
// MaxConcurrentDials defines a flag to set the maximum number of peers that a node will attempt to dial with from discovery.
|
||||
MaxConcurrentDials = &cli.Uint64Flag{
|
||||
Name: "max-concurrent-dials",
|
||||
Usage: "Sets the maximum number of peers that a node will attempt to dial with from discovery. By default we will dials as " +
|
||||
"many peers as possible.",
|
||||
}
|
||||
// SuggestedFeeRecipient specifies the fee recipient for the transaction fees.
|
||||
SuggestedFeeRecipient = &cli.StringFlag{
|
||||
Name: "suggested-fee-recipient",
|
||||
|
||||
@@ -11,6 +11,7 @@ type GlobalFlags struct {
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
@@ -45,11 +46,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
cfg.BlobBatchLimitBurstFactor = ctx.Int(BlobBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.MaxConcurrentDials = ctx.Int(MaxConcurrentDials.Name)
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
// MaxDialIsActive checks if the user has enabled the max dial flag.
|
||||
func MaxDialIsActive() bool {
|
||||
return Get().MaxConcurrentDials > 0
|
||||
}
|
||||
|
||||
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
|
||||
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
|
||||
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
|
||||
|
||||
@@ -6,10 +6,9 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/jwt",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -52,7 +50,7 @@ func generateAuthSecretInFile(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
secret, err := generateRandomHexString()
|
||||
secret, err := api.GenerateRandomHexString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,15 +60,3 @@ func generateAuthSecretInFile(c *cli.Context) error {
|
||||
logrus.Infof("Successfully wrote JSON-RPC authentication secret to file %s", fileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n <= 0 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
@@ -72,6 +72,7 @@ var appFlags = []cli.Flag{
|
||||
flags.WeakSubjectivityCheckpoint,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.MaxConcurrentDials,
|
||||
flags.SuggestedFeeRecipient,
|
||||
flags.TerminalTotalDifficultyOverride,
|
||||
flags.TerminalBlockHashOverride,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user