Compare commits

..

8 Commits

Author SHA1 Message Date
Nishant Das
0928c532be Merge branch 'develop' into addArchivalFlag 2024-03-14 18:35:21 +08:00
Nishant Das
e62a491092 Merge branch 'develop' into addArchivalFlag 2024-03-13 16:55:47 +08:00
Nishant Das
a8498dee75 Merge branch 'develop' into addArchivalFlag 2024-03-13 11:57:57 +08:00
nisdas
7f8105228e gaz 2024-03-12 19:15:37 +08:00
Nishant Das
9c482322a3 Merge branch 'develop' into addArchivalFlag 2024-03-12 18:46:09 +08:00
nisdas
9c417675d2 gaz 2024-03-12 18:45:55 +08:00
nisdas
cdaaa6e1a6 add test 2024-03-12 18:41:09 +08:00
nisdas
02c67c0f7c add this in 2024-03-12 17:37:24 +08:00
347 changed files with 6138 additions and 8417 deletions

3
.gitignore vendored
View File

@@ -41,6 +41,3 @@ jwt.hex
# manual testing
tmp
# spectest coverage reports
report.txt

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -29,7 +29,23 @@ http_archive(
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
zig_toolchains()
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
)
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
@@ -114,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -227,7 +243,9 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.0"
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
bls_test_version = "v0.1.1"
@@ -243,8 +261,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -259,8 +277,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -275,8 +293,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
)
http_archive(
@@ -290,7 +308,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -326,6 +344,22 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -1,24 +1,11 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"headers.go",
"jwt.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api",
visibility = ["//visibility:public"],
deps = [
"//crypto/rand:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jwt_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -304,8 +304,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -343,8 +341,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -383,8 +379,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {

View File

@@ -321,8 +321,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -349,8 +347,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -380,8 +376,6 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)

View File

@@ -4,6 +4,4 @@ const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
AuthTokenFileName = "auth-token"
)

View File

@@ -1,32 +0,0 @@
package api
import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
)
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
// used for beacon node -> execution client
// used for web client -> validator client
func GenerateRandomHexString() (string, error) {
secret := make([]byte, 32)
randGen := rand.NewGenerator()
n, err := randGen.Read(secret)
if err != nil {
return "", err
} else if n != 32 {
return "", errors.New("rand: unexpected length")
}
return hexutil.Encode(secret), nil
}
// ValidateAuthToken validating auth token for web
func ValidateAuthToken(token string) error {
b, err := hexutil.Decode(token)
// token should be hex-encoded and at least 256 bits
if err != nil || len(b) < 32 {
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
}
return nil
}

View File

@@ -1,13 +0,0 @@
package api
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestGenerateRandomHexString(t *testing.T) {
token, err := GenerateRandomHexString()
require.NoError(t, err)
require.NoError(t, ValidateAuthToken(token))
}

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToggleMultipleTimes(t *testing.T) {
func TestToogleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToggleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToggleAfterOverflow(t *testing.T) {
func TestToogleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,7 +20,6 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -220,9 +219,12 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -2,7 +2,7 @@
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more hermetic build.
# cached actions with a more heremtic build.
env -i \
PATH=/usr/bin:/bin \

View File

@@ -137,7 +137,7 @@ go_test(
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -398,6 +399,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -509,6 +518,13 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -82,20 +82,19 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -18,63 +18,17 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err != nil {
return nil
}
if slots.ToEpoch(targetSlot)+1 < headEpoch {
return nil
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
if err != nil {
return nil
}
if cachedState != nil && !cachedState.IsNil() {
return cachedState
}
st, err := s.HeadState(ctx)
if err != nil {
return nil
}
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
if err != nil {
return nil
}
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
return nil
}
return st
}
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
if st := s.getRecentPreState(ctx, c); st != nil {
return st, nil
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.

View File

@@ -146,28 +146,6 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -559,20 +558,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -586,20 +571,11 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Forkchoice.
// Foprkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -170,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(ctx, postState)
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -443,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -451,17 +451,8 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
@@ -497,10 +488,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {

View File

@@ -8,14 +8,12 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -380,38 +378,3 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -290,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -80,7 +80,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
fc := doublylinkedtree.New()

View File

@@ -8,7 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/async/event"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
@@ -79,7 +79,7 @@ type testServiceRequirements struct {
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositsnapshot.Cache
dc *depositcache.DepositCache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
@@ -94,7 +94,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositsnapshot.New()
dc, err := depositcache.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,

View File

@@ -0,0 +1,50 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"deposits_cache.go",
"log.go",
"pending_deposits.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"deposits_cache_test.go",
"pending_deposits_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -0,0 +1,327 @@
// Package depositcache is the source of validator deposits maintained
// in-memory by the beacon node deposits processed from the
// eth1 powchain are then stored in this cache to be accessed by
// any other service during a beacon node's runtime.
package depositcache
import (
"context"
"encoding/hex"
"math/big"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacondb_all_deposits",
Help: "The number of total deposits in the beaconDB in-memory database",
})
)
// FinalizedDeposits stores the trie of deposits that have been included
// in the beacon state up to the latest finalized checkpoint.
type FinalizedDeposits struct {
deposits *trie.SparseMerkleTrie
merkleTrieIndex int64
}
// DepositCache stores all in-memory deposit objects. This
// stores all the deposit related data that is required by the beacon-node.
type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*ethpb.DepositContainer
deposits []*ethpb.DepositContainer
finalizedDeposits FinalizedDeposits
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
depositsLock sync.RWMutex
}
// New instantiates a new deposit cache
func New() (*DepositCache, error) {
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
// Inserting the first item into the trie will set the value of the index to 0.
return &DepositCache{
pendingDeposits: []*ethpb.DepositContainer{},
deposits: []*ethpb.DepositContainer{},
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
}, nil
}
// InsertDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if int(index) != len(dc.deposits) {
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
}
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
depCtr := &ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
newDeposits := append(
[]*ethpb.DepositContainer{depCtr},
dc.deposits[heightIdx:]...)
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
// Append the deposit to our map, in the event no deposits
// exist for the pubkey , it is simply added to the map.
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
historicalDepositsCount.Inc()
return nil
}
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
dc.deposits = ctrs
for _, c := range ctrs {
// Use a new value, as the reference
// of c changes in the next iteration.
newPtr := c
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
}
historicalDepositsCount.Add(float64(len(ctrs)))
}
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
depositTrie := dc.finalizedDeposits.Deposits()
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return nil
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return nil
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
continue
}
if d.Index > eth1DepositIndex {
break
}
depHash, err := d.Deposit.Data.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
return errors.Wrap(err, "could not insert deposit hash")
}
insertIndex++
}
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
if !ok {
return errors.New("not a sparse merkle tree")
}
dc.finalizedDeposits = FinalizedDeposits{
deposits: tree,
merkleTrieIndex: eth1DepositIndex,
}
return nil
}
// AllDepositContainers returns all historical deposit containers.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
// Make a shallow copy of the deposits and return that. This way, the
// caller can safely iterate over the returned list of deposits without
// the possibility of new deposits showing up. If we were to return the
// list without a copy, when a new deposit is added to the cache, it
// would also be present in the returned value. This could result in a
// race condition if the list is being iterated over.
//
// It's not necessary to make a deep copy of this list because the
// deposits in the cache should never be modified. It is still possible
// for the caller to modify one of the underlying deposits and modify
// the cache, but that's not a race condition. Also, a deep copy would
// take too long and use too much memory.
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
copy(deposits, dc.deposits)
return deposits
}
// AllDeposits returns a list of historical deposits until the given block number
// (inclusive). If no block is specified then this method returns all historical deposits.
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return dc.allDeposits(untilBlk)
}
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
var deposits []*ethpb.Deposit
for _, ctnr := range dc.deposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
deposits = append(deposits, ctnr.Deposit)
}
}
return deposits
}
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
// deposit.
if heightIdx == 0 {
return 0, [32]byte{}
}
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
}
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var deposit *ethpb.Deposit
var blockNum *big.Int
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
if !ok || len(deps) == 0 {
return deposit, blockNum
}
// We always return the first deposit if a particular
// validator key has multiple deposits assigned to
// it.
deposit = deps[0].Deposit
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
return deposit, blockNum
}
// FinalizedDeposits returns the finalized deposits trie.
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return &FinalizedDeposits{
deposits: dc.finalizedDeposits.deposits.Copy(),
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
}, nil
}
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
if dc.finalizedDeposits.Deposits() == nil {
return dc.allDeposits(untilBlk)
}
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}
return deposits
}
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if untilDepositIndex >= int64(len(dc.deposits)) {
untilDepositIndex = int64(len(dc.deposits) - 1)
}
for i := untilDepositIndex; i >= 0; i-- {
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
if dc.deposits[i].Deposit.Proof == nil {
break
}
dc.deposits[i].Deposit.Proof = nil
}
return nil
}
// Deposits returns the cached internal deposit tree.
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
if fd.deposits != nil {
return fd.deposits
}
return nil
}
// MerkleTrieIndex represents the last finalized index in
// the finalized deposit container.
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
return fd.merkleTrieIndex
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,5 @@
package depositcache
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "depositcache")

View File

@@ -0,0 +1,151 @@
package depositcache
import (
"context"
"math/big"
"sort"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacondb_pending_deposits",
Help: "The number of pending deposits in the beaconDB in-memory database",
})
)
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
}).Debug("Ignoring nil deposit insertion")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
dc.pendingDeposits = append(dc.pendingDeposits,
&ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
pendingDepositsCount.Inc()
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
depositCntrs := dc.PendingContainers(ctx, untilBlk)
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
for _, dep := range depositCntrs {
deposits = append(deposits, dep.Deposit)
}
return deposits
}
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, ctnr := range dc.pendingDeposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
// Sort the deposits by Merkle index.
sort.SliceStable(depositCntrs, func(i, j int) bool {
return depositCntrs[i].Index < depositCntrs[j].Index
})
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
return depositCntrs
}
// RemovePendingDeposit from the database. The deposit is indexed by the
// Index. This method does nothing if deposit ptr is nil.
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
defer span.End()
if d == nil {
log.Debug("Ignoring nil deposit removal")
return
}
depRoot, err := hash.Proto(d)
if err != nil {
log.WithError(err).Error("Could not remove deposit")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
idx := -1
for i, ctnr := range dc.pendingDeposits {
h, err := hash.Proto(ctnr.Deposit)
if err != nil {
log.WithError(err).Error("Could not hash deposit")
continue
}
if h == depRoot {
idx = i
break
}
}
if idx >= 0 {
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
pendingDepositsCount.Dec()
}
}
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
defer span.End()
if merkleTreeIndex == 0 {
log.Debug("Ignoring 0 deposit removal")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, dp := range dc.pendingDeposits {
if dp.Index >= merkleTreeIndex {
cleanDeposits = append(cleanDeposits, dp)
}
}
dc.pendingDeposits = cleanDeposits
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
}

View File

@@ -1,32 +1,82 @@
package depositsnapshot
package depositcache
import (
"context"
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"google.golang.org/protobuf/proto"
)
var _ PendingDepositsFetcher = (*Cache)(nil)
var _ PendingDepositsFetcher = (*DepositCache)(nil)
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
}
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
assert.Equal(t, 0, len(dc.pendingDeposits))
}
func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
proof1 := makeDepositProof()
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
proof2 := makeDepositProof()
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
depToRemove := &ethpb.Deposit{Proof: proof1, Data: data}
otherDep := &ethpb.Deposit{Proof: proof2, Data: data}
db.pendingDeposits = []*ethpb.DepositContainer{
{Deposit: depToRemove, Index: 1},
{Deposit: otherDep, Index: 5},
}
db.RemovePendingDeposit(context.Background(), depToRemove)
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
t.Error("Failed to remove deposit")
}
}
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
}
func TestPendingDeposit_RoundTrip(t *testing.T) {
dc := DepositCache{}
proof := makeDepositProof()
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
dep := &ethpb.Deposit{Proof: proof, Data: data}
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
}
func TestPendingDeposits_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
@@ -46,7 +96,7 @@ func TestPendingDeposits_OK(t *testing.T) {
}
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
@@ -70,7 +120,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
}
func TestPrunePendingDeposits_OK(t *testing.T) {
dc := Cache{}
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},

View File

@@ -34,7 +34,6 @@ go_test(
name = "go_default_test",
srcs = [
"deposit_cache_test.go",
"deposit_fetcher_test.go",
"deposit_tree_snapshot_test.go",
"merkle_tree_test.go",
"spec_test.go",

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -262,12 +262,6 @@ func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedD
}
}
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.

View File

@@ -99,23 +99,11 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
if d.depositCount <= 0 {
return [32]byte{}, nil, ErrInvalidDepositCount
}
if index >= d.depositCount {
return [32]byte{}, nil, ErrInvalidIndex
}
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
finalizedIdx := -1
if finalizedDeposits != 0 {
fd, err := math.Int(finalizedDeposits)
if err != nil {
return [32]byte{}, nil, err
}
finalizedIdx = fd - 1
finalizedDeposits = finalizedDeposits - 1
}
i, err := math.Int(index)
if err != nil {
return [32]byte{}, nil, err
}
if finalizedDeposits > 0 && i <= finalizedIdx {
if index <= finalizedDeposits {
return [32]byte{}, nil, ErrInvalidIndex
}
leaf, proof := generateProof(d.tree, index, DepositContractDepth)

View File

@@ -109,6 +109,10 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
@@ -122,6 +126,10 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return
}
c.lock.Lock()
defer c.lock.Unlock()

View File

@@ -2,7 +2,6 @@ package cache_test
import (
"context"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -36,28 +35,3 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
c.Disable()
require.NoError(t, c.MarkInProgress(r))
c.Enable()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()
}

View File

@@ -50,8 +50,6 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",

View File

@@ -12,6 +12,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
var (
@@ -132,6 +133,9 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
//
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
return err
}
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
if err != nil {
return err
@@ -178,15 +182,24 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
if attEpoch+1 < currentEpoch {
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
attError = fmt.Errorf(
"attestation epoch %d not within current epoch %d or previous epoch",
attEpoch,
"attestation epoch %d not within current epoch %d or previous epoch %d",
attSlot/params.BeaconConfig().SlotsPerEpoch,
currentEpoch,
prevEpoch,
)
return errors.Join(ErrTooLate, attError)
}
return nil
}

View File

@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
},
{
name: "attestation.slot is well beyond current slot",
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
attSlot: 1 << 32,
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
wantedErr: "which exceeds max allowed value relative to the local clock",
},
}
for _, tt := range tests {

View File

@@ -140,7 +140,7 @@ func BeaconCommittee(
}
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
return computeCommittee(validatorIndices, seed, indexOffset, count)
}
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
@@ -359,7 +359,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
if err != nil {
return err
}
@@ -409,7 +409,7 @@ func ClearCache() {
balanceCache.Clear()
}
// ComputeCommittee returns the requested shuffled committee out of the total committees using
// computeCommittee returns the requested shuffled committee out of the total committees using
// validator indices and seed.
//
// Spec pseudocode definition:
@@ -424,7 +424,7 @@ func ClearCache() {
// start = (len(indices) * index) // count
// end = (len(indices) * uint64(index + 1)) // count
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
func ComputeCommittee(
func computeCommittee(
indices []primitives.ValidatorIndex,
seed [32]byte,
index, count uint64,
@@ -451,9 +451,9 @@ func ComputeCommittee(
return shuffledList[start:end], nil
}
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)

View File

@@ -1,4 +1,4 @@
package helpers_test
package helpers
import (
"context"
@@ -7,7 +7,6 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -22,7 +21,7 @@ import (
)
func TestComputeCommittee_WithoutCache(t *testing.T) {
helpers.ClearCache()
ClearCache()
// Create 10 committees
committeeCount := uint64(10)
@@ -49,16 +48,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
require.NoError(t, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(t, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
assert.NoError(t, err, "Could not compute committee")
// Test shuffled indices are correct for index 5 committee
index := uint64(5)
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
committee5, err := computeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start := slice.SplitOffset(validatorCount, committeeCount, index)
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -66,7 +65,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
// Test shuffled indices are correct for index 9 committee
index = uint64(9)
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
committee9, err := computeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start = slice.SplitOffset(validatorCount, committeeCount, index)
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -74,42 +73,42 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
func TestComputeCommittee_RegressionTest(t *testing.T) {
helpers.ClearCache()
ClearCache()
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
count := uint64(32)
_, err := helpers.ComputeCommittee(indices, seed, index, count)
_, err := computeCommittee(indices, seed, index, count)
require.ErrorContains(t, "index out of range", err)
}
func TestVerifyBitfieldLength_OK(t *testing.T) {
helpers.ClearCache()
ClearCache()
bf := bitfield.Bitlist{0xFF, 0x01}
committeeSize := uint64(8)
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
bf = bitfield.Bitlist{0xFF, 0x07}
committeeSize = 10
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
}
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
helpers.ClearCache()
ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
})
require.NoError(t, err)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
assert.ErrorContains(t, "can't be greater than next epoch", err)
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -128,7 +127,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
for _, s := range ss {
@@ -199,9 +198,9 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
helpers.ClearCache()
ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
@@ -216,7 +215,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
helpers.ClearCache()
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -238,17 +237,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
helpers.ClearCache()
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -264,12 +263,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
_, _, err = CommitteeAssignments(context.Background(), state, 0)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
helpers.ClearCache()
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -286,7 +285,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
})
require.NoError(t, err)
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
slotsWithProposers := make(map[primitives.Slot]bool)
@@ -392,10 +391,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
for i, tt := range tests {
helpers.ClearCache()
ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
} else {
@@ -405,7 +404,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
helpers.ClearCache()
ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -422,20 +421,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(0)
idx := primitives.CommitteeIndex(1)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
helpers.ClearCache()
ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -453,19 +452,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
@@ -482,20 +481,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -516,20 +515,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -550,9 +549,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -560,7 +559,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -585,9 +584,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -595,7 +594,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -620,9 +619,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -630,7 +629,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -656,13 +655,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
require.NoError(t, err)
// Verify previous epoch is cached
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
require.NoError(t, err)
assert.NotNil(t, activeIndices, "Did not cache active indices")
}
@@ -681,19 +680,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
})
require.NoError(t, err)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
wantedProposerIndices = append(wantedProposerIndices, index)
}

View File

@@ -1,17 +0,0 @@
//go:build fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.FakeCommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -1,17 +0,0 @@
//go:build !fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.CommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.SyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.ProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -1,10 +1,9 @@
package helpers_test
package helpers
import (
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -41,10 +40,10 @@ func TestRandaoMix_OK(t *testing.T) {
},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := helpers.RandaoMix(state, test.epoch)
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
}
@@ -77,10 +76,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := helpers.RandaoMix(state, test.epoch)
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
binary.LittleEndian.PutUint64(mix, uniqueNumber)
@@ -93,7 +92,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
}
func TestGenerateSeed_OK(t *testing.T) {
helpers.ClearCache()
ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(randaoMixes); i++ {
@@ -108,7 +107,7 @@ func TestGenerateSeed_OK(t *testing.T) {
})
require.NoError(t, err)
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, after which this overflows uint64.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -1,10 +1,9 @@
package helpers_test
package helpers
import (
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -15,7 +14,7 @@ import (
)
func TestTotalBalance_OK(t *testing.T) {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
@@ -23,19 +22,19 @@ func TestTotalBalance_OK(t *testing.T) {
}})
require.NoError(t, err)
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err)
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
balance := TotalBalance(state, []primitives.ValidatorIndex{})
wanted := params.BeaconConfig().EffectiveBalanceIncrement
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
@@ -52,7 +51,7 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err)
@@ -69,7 +68,7 @@ func TestTotalActiveBalance(t *testing.T) {
{10000},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -77,7 +76,7 @@ func TestTotalActiveBalance(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := helpers.TotalActiveBalance(state)
bal, err := TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -92,7 +91,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
{10000},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -100,7 +99,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := helpers.TotalActiveBalance(state)
bal, err := TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
}
@@ -116,7 +115,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
{vCount: 10000, wantCount: 10000},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -124,7 +123,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := helpers.TotalActiveBalance(state)
bal, err := TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -142,7 +141,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -150,7 +149,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
@@ -168,7 +167,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -176,13 +175,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
func TestFinalityDelay(t *testing.T) {
helpers.ClearCache()
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -196,25 +195,25 @@ func TestFinalityDelay(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
d := FinalityDelay(prevEpoch, finalizedEpoch)
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
d = FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
d = FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
}
func TestIsInInactivityLeak(t *testing.T) {
helpers.ClearCache()
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -228,13 +227,13 @@ func TestIsInInactivityLeak(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
}
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
@@ -286,7 +285,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -294,6 +293,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
}
}

View File

@@ -26,7 +26,7 @@ var (
// 1. Checks if the public key exists in the sync committee cache
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
root, err := SyncPeriodBoundaryRoot(st)
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
func IsNextPeriodSyncCommittee(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) (bool, error) {
root, err := SyncPeriodBoundaryRoot(st)
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
func CurrentPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
func NextPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
return indices
}
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
// Retrieve the current sync period boundary root by calculating sync period start epoch
// and calling `BlockRoot`.
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
// Can't call `BlockRoot` until the first slot.
if st.Slot() == params.BeaconConfig().GenesisSlot {
return params.BeaconConfig().ZeroHash, nil

View File

@@ -1,4 +1,4 @@
package helpers_test
package helpers
import (
"math/rand"
@@ -7,7 +7,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -18,7 +17,7 @@ import (
)
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -41,15 +40,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -71,13 +70,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -99,13 +98,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
require.ErrorContains(t, "validator index 12390192 does not exist", err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -128,15 +127,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
ok, err := IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -158,13 +157,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
ok, err := IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -186,13 +185,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
require.ErrorContains(t, "validator index 120391029 does not exist", err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -215,15 +214,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -244,27 +243,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := helpers.SyncPeriodBoundaryRoot(state)
root, err := syncPeriodBoundaryRoot(state)
require.NoError(t, err)
// Test that cache was empty.
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
// Test that helper can retrieve the index given empty cache.
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
// Test that cache was able to fill on miss.
time.Sleep(100 * time.Millisecond)
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -286,13 +285,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.ErrorContains(t, "validator index 129301923 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -315,15 +314,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -345,13 +344,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -373,43 +372,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.ErrorContains(t, "validator index 21093019 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
})
require.NoError(t, err)
err = helpers.UpdateSyncCommitteeCache(state)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
state, err = state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
})
require.NoError(t, err)
err = helpers.UpdateSyncCommitteeCache(state)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
helpers.ClearCache()
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
})
require.NoError(t, err)
err = helpers.UpdateSyncCommitteeCache(state)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
}
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -436,7 +435,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
@@ -447,7 +446,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
})
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
require.DeepNotEqual(t, comIdxs, newIdxs)
}

View File

@@ -1,4 +1,4 @@
package helpers_test
package helpers
import (
"context"
@@ -6,7 +6,6 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
@@ -33,7 +32,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
}
for _, test := range tests {
validator := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
}
}
@@ -54,7 +53,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
for _, test := range tests {
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
}
}
@@ -82,7 +81,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
}
}
@@ -162,7 +161,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run("without trie", func(t *testing.T) {
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
@@ -171,7 +170,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
})
@@ -224,17 +223,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
for _, tt := range tests {
helpers.ClearCache()
ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
result, err := helpers.BeaconProposerIndex(context.Background(), state)
result, err := BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Failed to get shard and committees at slot")
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
}
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
helpers.ClearCache()
ClearCache()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
@@ -262,12 +261,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
// Set a very high slot, so that retrieved block root will be
// non existent for the proposer cache.
require.NoError(t, state.SetSlot(100))
_, err = helpers.BeaconProposerIndex(context.Background(), state)
_, err = BeaconProposerIndex(context.Background(), state)
require.NoError(t, err)
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
@@ -282,22 +281,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
})
require.NoError(t, err)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
var proposerIndices []primitives.ValidatorIndex
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
proposerIndices = append(proposerIndices, index)
}
var wantedProposerIndices []primitives.ValidatorIndex
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
@@ -310,15 +309,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
}
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
helpers.ClearCache()
ClearCache()
epoch := primitives.Epoch(9999)
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
}
func TestActiveValidatorCount_Genesis(t *testing.T) {
helpers.ClearCache()
ClearCache()
c := 1000
validators := make([]*ethpb.Validator, c)
@@ -335,10 +334,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
require.NoError(t, err)
// Preset cache to a bad count.
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
}
@@ -354,7 +353,7 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
validators := make([]*ethpb.Validator, test.validatorCount)
for i := 0; i < len(validators); i++ {
@@ -369,9 +368,9 @@ func TestChurnLimit_OK(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
resultChurn := ValidatorActivationChurnLimit(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
}
}
@@ -387,7 +386,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
}
for _, test := range tests {
helpers.ClearCache()
ClearCache()
// Create validators
validators := make([]*ethpb.Validator, test.validatorCount)
@@ -406,11 +405,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
require.NoError(t, err)
// Get active validator count
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
// Test churn limit calculation
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn)
}
}
@@ -575,11 +574,11 @@ func TestActiveValidatorIndices(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
require.NoError(t, err)
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -685,12 +684,12 @@ func TestComputeProposerIndex(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
ClearCache()
bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
require.NoError(t, err)
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -719,9 +718,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
ClearCache()
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
})
}
}
@@ -748,11 +747,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
})
}
}
@@ -766,7 +765,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
hashFunc := hash.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
if err != nil {
return 0, err
}
@@ -788,7 +787,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
helpers.ClearCache()
ClearCache()
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
@@ -807,13 +806,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
require.NoError(t, beaconState.SetValidators(validators))
require.NoError(t, beaconState.SetBalances(balances))
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, index, primitives.ValidatorIndex(3))
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
helpers.ClearCache()
ClearCache()
e := primitives.Epoch(2)
r := [32]byte{'a'}
@@ -821,10 +820,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
ids := [32]primitives.ValidatorIndex{}
slot := primitives.Slot(69) // slot 5 in the Epoch
ids[5] = primitives.ValidatorIndex(19)
helpers.ProposerIndicesCache().Set(e, r, ids)
proposerIndicesCache.Set(e, r, ids)
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
proposerIndicesCache.SetCheckpoint(*c, r)
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
require.NoError(t, err)
require.Equal(t, ids[5], id)
}

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -94,15 +94,6 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -60,12 +59,7 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
e.diskSummary = sum
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
}
// stash adds an item to the in-memory cache of BlobSidecars.
@@ -87,17 +81,9 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, kc.count())
scs := make([]blocks.ROBlob, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
@@ -111,7 +97,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs = append(scs, *e.scs[i])
scs[i] = *e.scs[i]
}
return scs, nil

View File

@@ -3,14 +3,9 @@ package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestCacheEnsureDelete(t *testing.T) {
@@ -28,145 +23,3 @@ func TestCacheEnsureDelete(t *testing.T) {
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
entry := &cacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
expected = append(expected, blobs[i])
require.NoError(t, entry.stash(&blobs[i]))
}
require.Equal(t, numExpected, len(expected))
return entry, commits, expected
}
}
func TestFilterDiskSummary(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup filterTestCaseSetupFunc
}{
{
name: "full blobs, all on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
},
{
name: "full blobs, first on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
},
{
name: "full blobs, middle on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
},
{
name: "full blobs, last on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
},
{
name: "full blobs, none on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
},
{
name: "one commitment, on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
},
{
name: "one commitment, not on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
},
{
name: "two commitments, first on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
},
{
name: "two commitments, last on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
},
{
name: "two commitments, none on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
},
{
name: "two commitments, all on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}
func TestFilter(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
},
err: errCommitmentMismatch,
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
},
err: errMissingSidecar,
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
},
err: errCommitmentMismatch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}

View File

@@ -4,10 +4,9 @@ go_library(
name = "go_default_library",
srcs = [
"blob.go",
"cache.go",
"ephemeral.go",
"log.go",
"metrics.go",
"mock.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
@@ -34,7 +33,6 @@ go_test(
name = "go_default_test",
srcs = [
"blob_test.go",
"cache_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,9 +1,7 @@
package filesystem
import (
"context"
"fmt"
"math"
"os"
"path"
"strconv"
@@ -105,29 +103,12 @@ func (bs *BlobStorage) WarmCache() {
return
}
go func() {
start := time.Now()
if err := bs.pruner.warmCache(); err != nil {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
// determine which blobs are available.
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
if bs == nil || bs.pruner == nil {
return nil, ErrBlobStorageSummarizerUnavailable
}
return bs.pruner.waitForCache(ctx)
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
startTime := time.Now()
@@ -300,15 +281,6 @@ func (bs *BlobStorage) Clear() error {
return nil
}
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
if requested > math.MaxUint64-bs.retentionEpochs {
// If there is an overflow, then the retention period was set to an extremely large number.
return true
}
return requested+bs.retentionEpochs >= current
}
type blobNamer struct {
root [32]byte
index uint64

View File

@@ -2,7 +2,6 @@ package filesystem
import (
"bytes"
"math"
"os"
"path"
"sync"
@@ -25,7 +24,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
require.NoError(t, err)
t.Run("no error for duplicate", func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
existingSidecar := testSidecars[0]
blobPath := namerForSidecar(existingSidecar).path()
@@ -129,7 +129,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
// pollUntil polls a condition function until it returns true or a timeout is reached.
func TestBlobIndicesBounds(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
root := [32]byte{}
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
@@ -160,7 +161,8 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
func TestBlobStoragePrune(t *testing.T) {
currentSlot := primitives.Slot(200000)
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
t.Run("PruneOne", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
@@ -216,7 +218,8 @@ func TestBlobStoragePrune(t *testing.T) {
func BenchmarkPruning(b *testing.B) {
var t *testing.T
_, bs := NewEphemeralBlobStorageWithFs(t)
_, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
blockQty := 10000
currentSlot := primitives.Slot(150000)
@@ -245,50 +248,3 @@ func TestNewBlobStorage(t *testing.T) {
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}
func TestConfig_WithinRetentionPeriod(t *testing.T) {
retention := primitives.Epoch(16)
storage := &BlobStorage{retentionEpochs: retention}
cases := []struct {
name string
requested primitives.Epoch
current primitives.Epoch
within bool
}{
{
name: "before",
requested: 0,
current: retention + 1,
within: false,
},
{
name: "same",
requested: 0,
current: 0,
within: true,
},
{
name: "boundary",
requested: 0,
current: retention,
within: true,
},
{
name: "one less",
requested: retention - 1,
current: retention,
within: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
})
}
t.Run("overflow", func(t *testing.T) {
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
})
}

View File

@@ -1,119 +0,0 @@
package filesystem
import (
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
slot primitives.Slot
mask blobIndexMask
}
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
}
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
if !s.mask[i] {
return false
}
}
return true
}
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
type BlobStorageSummarizer interface {
Summary(root [32]byte) BlobStorageSummary
}
type blobStorageCache struct {
mu sync.RWMutex
nBlobs float64
cache map[string]BlobStorageSummary
}
var _ BlobStorageSummarizer = &blobStorageCache{}
func newBlobStorageCache() *blobStorageCache {
return &blobStorageCache{
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
// BlobSidecars based on Index.
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
k := rootString(root)
s.mu.RLock()
defer s.mu.RUnlock()
return s.cache[k]
}
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *blobStorageCache) evict(key string) {
var deleted float64
s.mu.Lock()
v, ok := s.cache[key]
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
}
delete(s.cache, key)
s.mu.Unlock()
if deleted > 0 {
s.updateMetrics(-deleted)
}
}
func (s *blobStorageCache) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}

View File

@@ -1,150 +0,0 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
for i := range allSet {
allSet[i] = true
}
cases := []struct {
name string
root [32]byte
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: &noneSet,
},
{
name: "index 1 set",
expected: &oneSet,
},
{
name: "all set",
expected: &allSet,
},
{
name: "first set",
expected: &firstSet,
},
{
name: "last set",
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
}
}
func TestAllAvailable(t *testing.T) {
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
r[i] = i
}
return r
}
require.DeepEqual(t, []int{}, idxUpTo(0))
require.DeepEqual(t, []int{0}, idxUpTo(1))
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
cases := []struct {
name string
idxSet []int
count int
aa bool
}{
{
// If there are no blobs committed, then all the committed blobs are available.
name: "none in idx, 0 arg",
count: 0,
aa: true,
},
{
name: "none in idx, 1 arg",
count: 1,
aa: false,
},
{
name: "first in idx, 1 arg",
idxSet: []int{0},
count: 1,
aa: true,
},
{
name: "second in idx, 1 arg",
idxSet: []int{1},
count: 1,
aa: false,
},
{
name: "first missing, 2 arg",
idxSet: []int{1},
count: 2,
aa: false,
},
{
name: "all missing, 1 arg",
count: 6,
aa: false,
},
{
name: "out of bound is safe",
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
name: "one present",
count: 1,
idxSet: idxUpTo(1),
aa: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}
sum := BlobStorageSummary{mask: mask}
require.Equal(t, c.aa, sum.AllAvailable(c.count))
})
}
}

View File

@@ -12,7 +12,7 @@ import (
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
return fs, &BlobStorage{fs: fs, pruner: pruner}
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
}
type BlobMocker struct {
@@ -61,15 +61,3 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
t.Fatal(err)
}
}
}
return c
}

View File

@@ -1,7 +1,6 @@
package filesystem
import (
"context"
"encoding/binary"
"io"
"path"
@@ -13,6 +12,7 @@ import (
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -32,39 +32,22 @@ type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
cache *blobStorageCache
cacheReady chan struct{}
warmed bool
slotMap *slotForRoot
fs afero.Fs
}
type prunerOpt func(*blobPruner) error
func withWarmedCache() prunerOpt {
return func(p *blobPruner) error {
return p.warmCache()
}
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
cw := make(chan struct{})
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
for _, o := range opts {
if err := o(p); err != nil {
return nil, err
}
}
return p, nil
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
return err
}
pruned := uint64(windowMin(latest, p.windowSize))
@@ -72,8 +55,6 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
go func() {
p.Lock()
defer p.Unlock()
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
@@ -81,7 +62,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
func windowMin(latest, offset primitives.Slot) primitives.Slot {
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
@@ -90,32 +71,12 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
return latest - offset
}
func (p *blobPruner) warmCache() error {
p.Lock()
defer p.Unlock()
if err := p.prune(0); err != nil {
return err
}
if !p.warmed {
p.warmed = true
close(p.cacheReady)
}
return nil
}
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
select {
case <-p.cacheReady:
return p.cache, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
@@ -161,7 +122,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.cache.slot(root)
slot, slotCached := p.slotMap.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
@@ -190,7 +151,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
if err != nil {
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
}
if err := p.cache.ensure(root, slot, idx); err != nil {
if err := p.slotMap.ensure(root, slot, idx); err != nil {
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
}
}
@@ -218,7 +179,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.cache.evict(rootFromDir(dir))
p.slotMap.evict(rootFromDir(dir))
return len(scFiles), nil
}
@@ -308,3 +269,71 @@ func filterSsz(s string) bool {
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotCacheEntry struct {
slot primitives.Slot
mask [fieldparams.MaxBlobsPerBlock]bool
}
type slotForRoot struct {
sync.RWMutex
nBlobs float64
cache map[string]*slotCacheEntry
}
func (s *slotForRoot) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
if !ok {
v = &slotCacheEntry{}
}
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
var deleted float64
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
s.updateMetrics(-deleted)
}
delete(s.cache, key)
}

View File

@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
root := fmt.Sprintf("%#x", sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -45,13 +45,14 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
})
t.Run("blobs to delete", func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -62,7 +63,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.cache.slot(root)
cs, cok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
@@ -82,7 +83,8 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
func TestTryPruneDir_SlotFromFile(t *testing.T) {
t.Run("expired blobs deleted", func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -93,12 +95,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.cache.slot(root)
cs, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.cache.evict(root)
_, ok = bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
@@ -114,9 +116,10 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
require.Equal(t, 0, len(files))
})
t.Run("not expired, intact", func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
// Set slot equal to the window size, so it should be retained.
slot := bs.pruner.windowSize
var slot primitives.Slot = bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -126,8 +129,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.cache.evict(root)
_, ok := bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
@@ -181,7 +184,8 @@ func TestSlotFromFile(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
@@ -239,8 +243,10 @@ func TestListDir(t *testing.T) {
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children,
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
@@ -276,7 +282,10 @@ func TestListDir(t *testing.T) {
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
return s == notABlob.name
if s == notABlob.name {
return true
}
return false
},
},
{

View File

@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
type SlasherDatabase interface {
io.Closer
SaveLastEpochWrittenForValidators(
SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
) error
SaveAttestationRecordsForValidators(

View File

@@ -20,7 +20,6 @@ go_library(
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
"migration_finalized_parent.go",
"migration_state_validators.go",
"schema.go",
"state.go",

View File

@@ -201,19 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
return err
}
encs[i-1] = penc
}
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
lastIdx := len(blocks) - 1
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[lastIdx])
if err != nil {
tracing.AnnotateError(span, err)
return err
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
encs[lastIdx] = enc
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)

View File

@@ -237,50 +237,6 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
// we're making 4 blocks so we can test an element without a valid child at the end
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
require.NoError(t, err)
// existing is the child that we'll set up in the index by hand to seed the index.
existing := blks[3]
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
toUpdate := blks[2]
// set up existing finalized block
ebpr := existing.Block().ParentRoot()
ebr := existing.Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
// writing this to the index outside of the validating function to seed the test.
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
// make sure that we still correctly validate descendents in the single item case.
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
// test wrong child param
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
// test parent of child that isn't finalized
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
// now make it work by writing the missing block
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
// since blks[1] is now in the index, we should be able to update blks[0]
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -296,23 +252,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds

View File

@@ -14,7 +14,6 @@ var migrations = []migration{
migrateArchivedIndex,
migrateBlockSlotIndex,
migrateStateValidators,
migrateFinalizedParent,
}
// RunMigrations defined in the migrations array.

View File

@@ -1,87 +0,0 @@
package kv
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
)
var migrationFinalizedParent = []byte("parent_bug_32fb183")
func migrateFinalizedParent(ctx context.Context, db *bolt.DB) error {
if updateErr := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if b := mb.Get(migrationFinalizedParent); bytes.Equal(b, migrationCompleted) {
return nil // Migration already completed.
}
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if bkt == nil {
return fmt.Errorf("unable to read %s bucket for migration", finalizedBlockRootsIndexBucket)
}
bb := tx.Bucket(blocksBucket)
if bb == nil {
return fmt.Errorf("unable to read %s bucket for migration", blocksBucket)
}
c := bkt.Cursor()
var slotsWithoutBug primitives.Slot
maxBugSearch := params.BeaconConfig().SlotsPerEpoch * 10
for k, v := c.Last(); k != nil; k, v = c.Prev() {
// check if context is cancelled in between
if ctx.Err() != nil {
return ctx.Err()
}
idxEntry := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, v, idxEntry); err != nil {
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", k)
}
// Not one of the corrupt values
if !bytes.Equal(idxEntry.ParentRoot, k) {
slotsWithoutBug += 1
if slotsWithoutBug > maxBugSearch {
break
}
continue
}
slotsWithoutBug = 0
log.WithField("root", fmt.Sprintf("%#x", k)).Debug("found index entry with incorrect parent root")
// Look up full block to get the correct parent root.
encBlk := bb.Get(k)
if encBlk == nil {
return errors.Wrapf(ErrNotFound, "could not find block for corrupt finalized index entry %#x", k)
}
blk, err := unmarshalBlock(ctx, encBlk)
if err != nil {
return errors.Wrapf(err, "unable to decode block for root=%#x", k)
}
// Replace parent root in the index with the correct value and write it back.
pr := blk.Block().ParentRoot()
idxEntry.ParentRoot = pr[:]
idxEnc, err := encode(ctx, idxEntry)
if err != nil {
return errors.Wrapf(err, "failed to encode finalized index entry for root=%#x", k)
}
if err := bkt.Put(k, idxEnc); err != nil {
return errors.Wrapf(err, "failed to update finalized index entry for root=%#x", k)
}
log.WithField("root", fmt.Sprintf("%#x", k)).
WithField("parentRoot", fmt.Sprintf("%#x", idxEntry.ParentRoot)).
Debug("updated corrupt index entry with correct parent")
}
// Mark migration complete.
return mb.Put(migrationFinalizedParent, migrationCompleted)
}); updateErr != nil {
log.WithError(updateErr).Errorf("could not run finalized parent root index repair migration")
return updateErr
}
return nil
}

View File

@@ -70,12 +70,12 @@ func (s *Store) LastEpochWrittenForValidators(
return attestedEpochs, err
}
// SaveLastEpochWrittenForValidators saves the latest epoch
// that each validator has attested to in the provided map.
func (s *Store) SaveLastEpochWrittenForValidators(
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
// of validator indices has attested to.
func (s *Store) SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochWrittenForValidators")
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
defer span.End()
const batchSize = 10000
@@ -157,7 +157,7 @@ func (s *Store) CheckAttesterDoubleVotes(
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
// Check if there is signing root in the database for this combination
@@ -166,7 +166,7 @@ func (s *Store) CheckAttesterDoubleVotes(
validatorEpochKey := append(encEpoch, encIdx...)
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
// An attestation record key consists of a signing root (32 bytes).
// An attestation record key is comprised of a signing root (32 bytes).
if len(attRecordsKey) < attestationRecordKeySize {
// If there is no signing root for this combination,
// then there is no double vote. We can continue to the next validator.
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < rootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))

View File

@@ -89,7 +89,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, len(attestedEpochs))
err = beaconDB.SaveLastEpochWrittenForValidators(ctx, epochsByValidator)
err = beaconDB.SaveLastEpochsWrittenForValidators(ctx, epochsByValidator)
require.NoError(t, err)
retrievedEpochs, err := beaconDB.LastEpochWrittenForValidators(ctx, indices)

View File

@@ -36,6 +36,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -93,7 +94,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -20,7 +20,9 @@ import (
coreState "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
statenative "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -224,14 +226,16 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog *gethtypes.L
"merkleTreeIndex": index,
}).Info("Invalid deposit registered in deposit contract")
}
// We finalize the trie here so that old deposits are not kept around, as they make
// deposit tree htr computation expensive.
dTrie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
if !ok {
return errors.Errorf("wrong trie type initialized: %T", dTrie)
}
if err := dTrie.Finalize(index, depositLog.BlockHash, depositLog.BlockNumber); err != nil {
log.WithError(err).Error("Could not finalize trie")
if features.Get().EnableEIP4881 {
// We finalize the trie here so that old deposits are not kept around, as they make
// deposit tree htr computation expensive.
dTrie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
if !ok {
return errors.Errorf("wrong trie type initialized: %T", dTrie)
}
if err := dTrie.Finalize(index, depositLog.BlockHash, depositLog.BlockNumber); err != nil {
log.WithError(err).Error("Could not finalize trie")
}
}
return nil
@@ -575,17 +579,25 @@ func (s *Service) savePowchainData(ctx context.Context) error {
BeaconState: pbState, // I promise not to mutate it!
DepositContainers: s.cfg.depositCache.AllDepositContainers(ctx),
}
fd, err := s.cfg.depositCache.FinalizedDeposits(ctx)
if err != nil {
return errors.Errorf("could not get finalized deposit tree: %v", err)
}
tree, ok := fd.Deposits().(*depositsnapshot.DepositTree)
if !ok {
return errors.New("deposit tree was not EIP4881 DepositTree")
}
eth1Data.DepositSnapshot, err = tree.ToProto()
if err != nil {
return err
if features.Get().EnableEIP4881 {
fd, err := s.cfg.depositCache.FinalizedDeposits(ctx)
if err != nil {
return errors.Errorf("could not get finalized deposit tree: %v", err)
}
tree, ok := fd.Deposits().(*depositsnapshot.DepositTree)
if !ok {
return errors.New("deposit tree was not EIP4881 DepositTree")
}
eth1Data.DepositSnapshot, err = tree.ToProto()
if err != nil {
return err
}
} else {
tree, ok := s.depositTrie.(*trie.SparseMerkleTrie)
if !ok {
return errors.New("deposit tree was not SparseMerkleTrie")
}
eth1Data.Trie = tree.ToProto()
}
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
@@ -31,7 +31,7 @@ func TestProcessDepositLog_OK(t *testing.T) {
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
@@ -100,7 +100,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) {
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -216,7 +216,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -291,7 +291,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
@@ -384,7 +384,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
kvStore := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -481,7 +481,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
testAcc, err := mock.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
kvStore := testDB.SetupDB(t)
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -593,7 +593,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
}
func newPowchainService(t *testing.T, eth1Backend *mock.TestAccount, beaconDB db.Database) *Service {
depositCache, err := depositsnapshot.New()
depositCache, err := depositcache.New()
require.NoError(t, err)
server, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)

View File

@@ -10,7 +10,7 @@ import (
)
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
// also tests the interchangeability of the explicit prometheus Register/Unregister
// also tests the interchangability of the explicit prometheus Register/Unregister
// and the implicit methods within the collector implementation
func TestCleanup(t *testing.T) {
ctx := context.Background()
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
}
// TestCancellation tests that canceling the context passed into
// TestCancelation tests that canceling the context passed into
// NewPowchainCollector cleans everything up as expected. This
// does come at the cost of an extra channel cluttering up
// PowchainCollector, just for this test.
func TestCancellation(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")

View File

@@ -29,6 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
@@ -163,7 +164,14 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop()
var depositTrie cache.MerkleTree
var err error
depositTrie = depositsnapshot.NewDepositTree()
if features.Get().EnableEIP4881 {
depositTrie = depositsnapshot.NewDepositTree()
} else {
depositTrie, err = trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, errors.Wrap(err, "could not set up deposit trie")
}
}
genState, err := transition.EmptyGenesisState()
if err != nil {
return nil, errors.Wrap(err, "could not set up genesis state")
@@ -732,12 +740,20 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
return nil
}
var err error
if eth1DataInDB.DepositSnapshot != nil {
s.depositTrie, err = depositsnapshot.DepositTreeFromSnapshotProto(eth1DataInDB.DepositSnapshot)
} else {
if err = s.migrateOldDepositTree(eth1DataInDB); err != nil {
return err
if features.Get().EnableEIP4881 {
if eth1DataInDB.DepositSnapshot != nil {
s.depositTrie, err = depositsnapshot.DepositTreeFromSnapshotProto(eth1DataInDB.DepositSnapshot)
} else {
if err := s.migrateOldDepositTree(eth1DataInDB); err != nil {
return err
}
}
} else {
if eth1DataInDB.Trie == nil && eth1DataInDB.DepositSnapshot != nil {
return errors.Errorf("trying to use old deposit trie after migration to the new trie. "+
"Remove the --%s flag to resume normal operations.", features.DisableEIP4881.Name)
}
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
}
if err != nil {
return err
@@ -750,19 +766,21 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
}
}
s.latestEth1Data = eth1DataInDB.CurrentEth1Data
ctrs := eth1DataInDB.DepositContainers
// Look at previously finalized index, as we are building off a finalized
// snapshot rather than the full trie.
lastFinalizedIndex := int64(s.depositTrie.NumOfItems() - 1)
// Correctly initialize missing deposits into active trie.
for _, c := range ctrs {
if c.Index > lastFinalizedIndex {
depRoot, err := c.Deposit.Data.HashTreeRoot()
if err != nil {
return err
}
if err := s.depositTrie.Insert(depRoot[:], int(c.Index)); err != nil {
return err
if features.Get().EnableEIP4881 {
ctrs := eth1DataInDB.DepositContainers
// Look at previously finalized index, as we are building off a finalized
// snapshot rather than the full trie.
lastFinalizedIndex := int64(s.depositTrie.NumOfItems() - 1)
// Correctly initialize missing deposits into active trie.
for _, c := range ctrs {
if c.Index > lastFinalizedIndex {
depRoot, err := c.Deposit.Data.HashTreeRoot()
if err != nil {
return err
}
if err := s.depositTrie.Insert(depRoot[:], int(c.Index)); err != nil {
return err
}
}
}
}
@@ -829,13 +847,21 @@ func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData,
BeaconState: pbState,
DepositContainers: s.cfg.depositCache.AllDepositContainers(ctx),
}
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
if !ok {
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
}
eth1Data.DepositSnapshot, err = trie.ToProto()
if err != nil {
return nil, err
if features.Get().EnableEIP4881 {
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
if !ok {
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
}
eth1Data.DepositSnapshot, err = trie.ToProto()
if err != nil {
return nil, err
}
} else {
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
if !ok {
return nil, errors.New("deposit trie was not SparseMerkleTrie")
}
eth1Data.Trie = trie.ToProto()
}
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
return nil, err

View File

@@ -15,7 +15,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
@@ -348,7 +348,7 @@ func TestInitDepositCache_OK(t *testing.T) {
cfg: &config{beaconDB: beaconDB},
}
var err error
s.cfg.depositCache, err = depositsnapshot.New()
s.cfg.depositCache, err = depositcache.New()
require.NoError(t, err)
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
@@ -409,7 +409,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
cfg: &config{beaconDB: beaconDB},
}
var err error
s.cfg.depositCache, err = depositsnapshot.New()
s.cfg.depositCache, err = depositcache.New()
require.NoError(t, err)
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
@@ -553,7 +553,7 @@ func Test_batchRequestHeaders_UnderflowChecks(t *testing.T) {
func TestService_EnsureConsistentPowchainData(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
cache, err := depositsnapshot.New()
cache, err := depositcache.New()
require.NoError(t, err)
srv, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -583,7 +583,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
func TestService_InitializeCorrectly(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
cache, err := depositsnapshot.New()
cache, err := depositcache.New()
require.NoError(t, err)
srv, endpoint, err := mockExecution.SetupRPCServer()
@@ -614,7 +614,7 @@ func TestService_InitializeCorrectly(t *testing.T) {
func TestService_EnsureValidPowchainData(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
cache, err := depositsnapshot.New()
cache, err := depositcache.New()
require.NoError(t, err)
srv, endpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)
@@ -809,7 +809,7 @@ func (s *slowRPCClient) CallContext(_ context.Context, _ interface{}, _ string,
func TestService_migrateOldDepositTree(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
cache, err := depositsnapshot.New()
cache, err := depositcache.New()
require.NoError(t, err)
srv, endpoint, err := mockExecution.SetupRPCServer()

View File

@@ -21,6 +21,7 @@ go_library(
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
@@ -54,6 +55,8 @@ go_library(
"//beacon-chain/verification:go_default_library",
"//cmd:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//cmd/beacon-chain/storage/flags:go_default_library",
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -93,6 +96,8 @@ go_test(
"//beacon-chain/monitor:go_default_library",
"//cmd:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//cmd/beacon-chain/storage/flags:go_default_library",
"//cmd/beacon-chain/sync/backfill/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -2,17 +2,26 @@ package node
import (
"fmt"
"math"
"github.com/ethereum/go-ethereum/common"
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
storageFlags "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/storage/flags"
backfill "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/sync/backfill/flags"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
tracing2 "github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/urfave/cli/v2"
)
const (
maxBlobRetentionEpoch = math.MaxUint32
archivalSlotsPerArchivedPoint = 32
)
func configureTracing(cliCtx *cli.Context) error {
return tracing2.Setup(
"beacon-chain", // service name
@@ -198,6 +207,37 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
return params.SetActive(c)
}
func configureArchivalNode(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.ArchivalNodeFlag.Name) {
log.Info("Enabling Archival mode on the beacon node")
if cliCtx.IsSet(flags.SlotsPerArchivedPoint.Name) {
log.Infof("Changing slots per archived point from %d to %d", cliCtx.Int(flags.SlotsPerArchivedPoint.Name), archivalSlotsPerArchivedPoint)
}
if err := cliCtx.Set(flags.SlotsPerArchivedPoint.Name, fmt.Sprintf("%d", archivalSlotsPerArchivedPoint)); err != nil {
return err
}
if !cliCtx.IsSet(features.SaveFullExecutionPayloads.Name) {
log.Info("Saving full execution payloads")
if err := cliCtx.Set(features.SaveFullExecutionPayloads.Name, "true"); err != nil {
return err
}
}
if !cliCtx.IsSet(backfill.EnableExperimentalBackfill.Name) {
log.Info("Enabling backfill on nodes")
if err := cliCtx.Set(backfill.EnableExperimentalBackfill.Name, "true"); err != nil {
return err
}
}
if cliCtx.IsSet(storageFlags.BlobRetentionEpochFlag.Name) {
log.Infof("Changing blob retention epochs from %d to %d", cliCtx.Uint64(storageFlags.BlobRetentionEpochFlag.Name), maxBlobRetentionEpoch)
}
if err := cliCtx.Set(storageFlags.BlobRetentionEpochFlag.Name, fmt.Sprintf("%d", maxBlobRetentionEpoch)); err != nil {
return err
}
}
return nil
}
func configureFastSSZHashingAlgorithm() {
fastssz.EnableVectorizedHTR = true
}

View File

@@ -11,6 +11,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
storageFlags "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/storage/flags"
backfill "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/sync/backfill/flags"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -228,3 +231,38 @@ func TestConfigureInterop(t *testing.T) {
})
}
}
func TestConfigureArchivalNode(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.Bool(flags.ArchivalNodeFlag.Name, false, "")
set.Int(flags.SlotsPerArchivedPoint.Name, 2048, "")
set.Bool(features.SaveFullExecutionPayloads.Name, false, "")
set.Bool(backfill.EnableExperimentalBackfill.Name, false, "")
set.Uint64(storageFlags.BlobRetentionEpochFlag.Name, 4096, "")
require.NoError(t, set.Set(flags.ArchivalNodeFlag.Name, "true"))
cliCtx := cli.NewContext(&app, set, nil)
require.NoError(t, configureArchivalNode(cliCtx))
assert.LogsContain(t, hook, "Enabling Archival mode on the beacon node")
assert.LogsContain(t, hook, "Saving full execution payloads")
assert.LogsContain(t, hook, "Enabling backfill on nodes")
hook.Reset()
require.NoError(t, set.Set(flags.SlotsPerArchivedPoint.Name, "256"))
require.NoError(t, set.Set(features.SaveFullExecutionPayloads.Name, "true"))
require.NoError(t, set.Set(backfill.EnableExperimentalBackfill.Name, "true"))
require.NoError(t, set.Set(storageFlags.BlobRetentionEpochFlag.Name, "2048"))
cliCtx = cli.NewContext(&app, set, nil)
require.NoError(t, configureArchivalNode(cliCtx))
assert.LogsContain(t, hook, "Enabling Archival mode on the beacon node")
assert.LogsContain(t, hook, "Changing slots per archived point from 256 to 32")
assert.LogsContain(t, hook, "Changing blob retention epochs from 2048 to 4294967295")
}

View File

@@ -7,11 +7,9 @@ import (
"bytes"
"context"
"fmt"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@@ -25,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
@@ -128,16 +127,54 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
if err := configureTracing(cliCtx); err != nil {
return nil, err
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return nil, fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := configureArchivalNode(cliCtx); err != nil {
return nil, err
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return nil, err
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return nil, err
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return nil, err
}
err := configureBuilderCircuitBreaker(cliCtx)
if err != nil {
return nil, err
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return nil, err
}
if err := configureEth1Config(cliCtx); err != nil {
return nil, err
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return nil, err
}
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureFastSSZHashingAlgorithm()
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
@@ -157,10 +194,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
}
beacon.initialSyncComplete = make(chan struct{})
beacon.syncChecker = &initialsync.SyncChecker{}
for _, opt := range opts {
if err := opt(beacon); err != nil {
return nil, err
@@ -169,8 +206,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
beacon.forkChoicer = doublylinkedtree.New()
depositAddress, err := execution.DepositContractAddress()
if err != nil {
return nil, err
@@ -186,29 +223,112 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.BlobStorage = blobs
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
return nil, errors.Wrap(err, "backfill status initialization error")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, err
}
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
beacon.BackfillOpts = append(beacon.BackfillOpts, backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)))
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return nil, err
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return nil, err
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return nil, err
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return nil, err
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return nil, err
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
}
}
// db.DatabasePath is the path to the containing directory
@@ -226,170 +346,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return beacon, nil
}
func configureBeacon(cliCtx *cli.Context) error {
if err := configureTracing(cliCtx); err != nil {
return errors.Wrap(err, "could not configure tracing")
}
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
if hasNetworkFlag(cliCtx) && cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
return fmt.Errorf("%s cannot be passed concurrently with network flag", cmd.ChainConfigFileFlag.Name)
}
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
return errors.Wrap(err, "could not configure beacon chain")
}
flags.ConfigureGlobalFlags(cliCtx)
if err := configureChainConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure chain config")
}
if err := configureHistoricalSlasher(cliCtx); err != nil {
return errors.Wrap(err, "could not configure historical slasher")
}
if err := configureBuilderCircuitBreaker(cliCtx); err != nil {
return errors.Wrap(err, "could not configure builder circuit breaker")
}
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
return errors.Wrap(err, "could not configure slots per archived point")
}
if err := configureEth1Config(cliCtx); err != nil {
return errors.Wrap(err, "could not configure eth1 config")
}
configureNetwork(cliCtx)
if err := configureInteropConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure interop config")
}
if err := configureExecutionSetting(cliCtx); err != nil {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, errors.Wrap(err, "could not start DB")
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, errors.Wrap(err, "could not start state generation")
}
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return errors.Wrap(err, "could not register POW chain service")
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return errors.Wrap(err, "could not register attestation pool service")
}
log.Debugln("Registering Deterministic Genesis Service")
if err := beacon.registerDeterministicGenesisService(); err != nil {
return errors.Wrap(err, "could not register deterministic genesis service")
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register blockchain service")
}
log.Debugln("Registering Initial Sync Service")
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register initial sync service")
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(beacon.initialSyncComplete, bfs); err != nil {
return errors.Wrap(err, "could not register sync service")
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(cliCtx); err != nil {
return errors.Wrap(err, "could not register builder service")
}
log.Debugln("Registering RPC Service")
router := newRouter(cliCtx)
if err := beacon.registerRPCService(router); err != nil {
return errors.Wrap(err, "could not register RPC service")
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(router); err != nil {
return errors.Wrap(err, "could not register GRPC gateway service")
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
return errors.Wrap(err, "could not register validator monitoring service")
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return errors.Wrap(err, "could not register prometheus service")
}
}
return nil
}
func initSyncWaiter(ctx context.Context, complete chan struct{}) func() error {
return func() error {
select {
@@ -478,86 +434,40 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return errors.Wrap(err, "could not get deposit contract address")
}
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"--%s to wipe the old database or use an alternative data directory with --%s",
knownContract, addr.Bytes(), cmd.ClearDB.Name, cmd.DataDirFlag.Name)
}
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return err
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
if err := d.RunMigrations(b.ctx); err != nil {
@@ -566,7 +476,12 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
b.db = d
depositCache, err = depositsnapshot.New()
var depositCache cache.DepositCache
if features.Get().EnableEIP4881 {
depositCache, err = depositsnapshot.New()
} else {
depositCache, err = depositcache.New()
}
if err != nil {
return errors.Wrap(err, "could not create deposit cache")
}
@@ -576,17 +491,16 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if err == db.ErrExistingGenesisState {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
return errors.New("Genesis state flag specified but a genesis state " +
"exists already. Run again with --clear-db and/or ensure you are using the " +
"appropriate testnet flag to load the given genesis state.")
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
return err
}
if b.CheckpointInitializer != nil {
@@ -595,11 +509,23 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
}
if err := b.checkAndSaveDepositContract(depositAddress); err != nil {
return errors.Wrap(err, "could not check and save deposit contract")
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
return err
}
log.WithField("address", depositAddress).Info("Deposit contract")
addr := common.HexToAddress(depositAddress)
if len(knownContract) == 0 {
if err := b.db.SaveDepositContractAddress(b.ctx, addr); err != nil {
return errors.Wrap(err, "could not save deposit contract")
}
}
if len(knownContract) > 0 && !bytes.Equal(addr.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
knownContract, addr.Bytes())
}
log.Infof("Deposit contract: %#x", addr.Bytes())
return nil
}
@@ -638,7 +564,9 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
@@ -685,32 +613,31 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
if err != nil {
return errors.Wrapf(err, "could not register p2p service")
return err
}
svc, err := p2p.NewService(b.ctx, &p2p.Config{
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
BootstrapNodeAddr: bootstrapNodeAddrs,
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
})
if err != nil {
return err
@@ -1052,13 +979,11 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
return nil
}
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := b.cliCtx.String(flags.GRPCGatewayHost.Name)
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
selfAddress := fmt.Sprintf("%s:%d", rpcHost, b.cliCtx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
selfCert := b.cliCtx.String(flags.CertFlag.Name)
@@ -1152,9 +1077,9 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return err
}
opts := b.serviceFlagOpts.builderOpts
opts = append(opts, builder.WithHeadFetcher(chainService), builder.WithDatabase(b.db))
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
// make cache the default.
if !cliCtx.Bool(features.DisableRegistrationCache.Name) {
opts = append(opts, builder.WithRegistrationCache())

View File

@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
want bool
}{
{
name: "Holesky testnet",
networkName: features.HoleskyTestnet.Name,
networkValue: "holesky",
name: "Prater testnet",
networkName: features.PraterTestnet.Name,
networkValue: "prater",
want: true,
},
{

View File

@@ -11,7 +11,6 @@ go_library(
deps = [
"//cmd:go_default_library",
"//config/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@in_gopkg_yaml_v2//:go_default_library",

View File

@@ -4,7 +4,6 @@ import (
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/urfave/cli/v2"
@@ -32,9 +31,9 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
if dataDir == "" {
dataDir = cmd.DefaultDataDir()
if dataDir == "" {
err = errors.Errorf(
"Could not determine your system's HOME path, please specify a --%s you wish to use for your chain data",
cmd.DataDirFlag.Name,
log.Fatal(
"Could not determine your system's HOME path, please specify a --datadir you wish " +
"to use for your chain data",
)
}
}

View File

@@ -49,7 +49,6 @@ go_test(
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneAttsPool prunes attestations pool on every slot interval.
@@ -67,18 +66,7 @@ func (s *Service) pruneExpiredAtts() {
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(providedSlot primitives.Slot) bool {
providedEpoch := slots.ToEpoch(providedSlot)
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch < params.BeaconConfig().DenebForkEpoch {
return s.expiredPreDeneb(providedSlot)
}
return providedEpoch+1 < currEpoch
}
// Handles expiration of attestations before deneb.
func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
func (s *Service) expired(slot primitives.Slot) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + uint64(expirationSlot.Mul(params.BeaconConfig().SecondsPerSlot))
currentTime := uint64(prysmTime.Now().Unix())

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/async"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -128,22 +127,3 @@ func TestPruneExpired_Expired(t *testing.T) {
assert.Equal(t, true, s.expired(0), "Should be expired")
assert.Equal(t, false, s.expired(1), "Should not be expired")
}
func TestPruneExpired_ExpiredDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.DenebForkEpoch = 3
params.OverrideBeaconConfig(cfg)
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)
// Rewind back 4 epochs + 10 slots worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - (4*uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) + 10)
secondEpochStart := primitives.Slot(2 * uint64(params.BeaconConfig().SlotsPerEpoch))
thirdEpochStart := primitives.Slot(3 * uint64(params.BeaconConfig().SlotsPerEpoch))
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -90,12 +90,10 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -10,13 +10,11 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
@@ -70,7 +68,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastAttestation(ctx, subnet, att, forkDigest)
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
return nil
}
@@ -96,8 +94,8 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -138,11 +136,8 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// In the event our attestation is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
return
}
@@ -223,13 +218,13 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.internalBroadcastBlob(ctx, subnet, blob, forkDigest)
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
return nil
}
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@@ -240,8 +240,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
var hosts []host.Host
// setup other nodes.
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 30,
BootstrapNodeAddr: []string{bootNode.String()},
Discv5BootStrapAddr: []string{bootNode.String()},
MaxPeers: 30,
}
// Setup 2 different hosts
for i := 1; i <= 2; i++ {

View File

@@ -12,28 +12,28 @@ const defaultPubsubQueueSize = 600
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
Discv5BootStrapAddrs []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
QUICPort uint
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool
StaticPeers []string
BootstrapNodeAddr []string
Discv5BootStrapAddr []string
RelayNodeAddr string
LocalIP string
HostAddress string
HostDNS string
PrivateKey string
DataDir string
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
QueueSize uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
}
// validateConfig validates whether the values provided are accurate and will set

View File

@@ -25,7 +25,7 @@ const (
)
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
func (_ *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
return true
}
@@ -63,12 +63,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
// InterceptSecured tests whether a given connection, now authenticated,
// is allowed.
func (*Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
func (_ *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
return true
}
// InterceptUpgraded tests whether a fully capable connection is allowed.
func (*Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
func (_ *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
return true, 0
}

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"crypto/ecdsa"
"net"
"sync"
"time"
"github.com/ethereum/go-ethereum/p2p/discover"
@@ -16,11 +15,8 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -38,16 +34,6 @@ type Listener interface {
LocalNode() *enode.LocalNode
}
const (
udp4 = iota
udp6
)
type quicProtocol uint16
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return "quic" }
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
@@ -76,14 +62,8 @@ func (s *Service) RefreshENR() {
// Compare current epoch with our fork epochs
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
switch {
case currEpoch < altairForkEpoch:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
default:
// Altair Behaviour
case currEpoch >= altairForkEpoch:
// Retrieve sync subnets from application level
// cache.
bitS := bitfield.Bitvector4{byte(0x00)}
@@ -102,6 +82,13 @@ func (s *Service) RefreshENR() {
return
}
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
default:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
}
// ping all peers to inform them of new metadata
s.pingPeers()
@@ -109,15 +96,14 @@ func (s *Service) RefreshENR() {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
iterator := s.dv5Listener.RandomNodes()
iterator = enode.Filter(iterator, s.filterPeer)
defer iterator.Close()
for {
// Exit if service's context is canceled.
// Exit if service's context is canceled
if s.ctx.Err() != nil {
break
}
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
@@ -125,47 +111,23 @@ func (s *Service) listenForNewNodes() {
time.Sleep(pollingPeriod)
continue
}
wantedCount := s.wantedPeerDials()
if wantedCount == 0 {
log.Trace("Not looking for peers, at peer limit")
time.Sleep(pollingPeriod)
exists := iterator.Next()
if !exists {
break
}
node := iterator.Node()
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
}
// Restrict dials if limit is applied.
if flags.MaxDialIsActive() {
var err error
wantedICount := math.Min(uint64(wantedCount), uint64(flags.Get().MaxConcurrentDials))
wantedCount, err = math.Int(wantedICount)
if err != nil {
log.WithError(err).Error("Could not get wanted count")
continue
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
go func(info *peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, *info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
}
wantedNodes := enode.ReadNodes(iterator, wantedCount)
wg := new(sync.WaitGroup)
for i := 0; i < len(wantedNodes); i++ {
node := wantedNodes[i]
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
}
if peerInfo == nil {
continue
}
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
wg.Add(1)
go func(info *peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, *info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
wg.Done()
}(peerInfo)
}
wg.Wait()
}(peerInfo)
}
}
@@ -178,9 +140,9 @@ func (s *Service) createListener(
// by default we will listen to all interfaces.
var bindIP net.IP
switch udpVersionFromIP(ipAddr) {
case udp4:
case "udp4":
bindIP = net.IPv4zero
case udp6:
case "udp6":
bindIP = net.IPv6zero
default:
return nil, errors.New("invalid ip provided")
@@ -198,10 +160,10 @@ func (s *Service) createListener(
IP: bindIP,
Port: int(s.cfg.UDPPort),
}
// Listen to all network interfaces
// for both ip protocols.
conn, err := net.ListenUDP("udp", udpAddr)
networkVersion := "udp"
conn, err := net.ListenUDP(networkVersion, udpAddr)
if err != nil {
return nil, errors.Wrap(err, "could not listen to UDP")
}
@@ -211,86 +173,24 @@ func (s *Service) createListener(
ipAddr,
int(s.cfg.UDPPort),
int(s.cfg.TCPPort),
int(s.cfg.QUICPort),
)
if err != nil {
return nil, errors.Wrap(err, "could not create local node")
}
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
for _, addr := range s.cfg.Discv5BootStrapAddrs {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return nil, errors.Wrap(err, "could not bootstrap addr")
}
bootNodes = append(bootNodes, bootNode)
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
Bootnodes: bootNodes,
}
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return listener, nil
}
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort, quicPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
return nil, errors.Wrap(err, "could not open node's peer database")
}
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
localNode.Set(ipEntry)
udpEntry := enr.UDP(udpPort)
localNode.Set(udpEntry)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(tcpEntry)
if features.Get().EnableQUIC {
quicEntry := quicProtocol(quicPort)
localNode.Set(quicEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
localNode = initializeSyncCommSubnets(localNode)
if s.cfg != nil && s.cfg.HostAddress != "" {
if s.cfg.HostAddress != "" {
hostIP := net.ParseIP(s.cfg.HostAddress)
if hostIP.To4() == nil && hostIP.To16() == nil {
return nil, errors.Errorf("invalid host address: %s", s.cfg.HostAddress)
log.Errorf("Invalid host address given: %s", hostIP.String())
} else {
localNode.SetFallbackIP(hostIP)
localNode.SetStaticIP(hostIP)
}
}
if s.cfg != nil && s.cfg.HostDNS != "" {
if s.cfg.HostDNS != "" {
host := s.cfg.HostDNS
ips, err := net.LookupIP(host)
if err != nil {
return nil, errors.Wrapf(err, "could not resolve host address: %s", host)
return nil, errors.Wrap(err, "could not resolve host address")
}
if len(ips) > 0 {
// Use first IP returned from the
@@ -299,8 +199,51 @@ func (s *Service) createLocalNode(
localNode.SetFallbackIP(firstIP)
}
}
dv5Cfg := discover.Config{
PrivateKey: privKey,
}
dv5Cfg.Bootnodes = []*enode.Node{}
for _, addr := range s.cfg.Discv5BootStrapAddr {
bootNode, err := enode.Parse(enode.ValidSchemes, addr)
if err != nil {
return nil, errors.Wrap(err, "could not bootstrap addr")
}
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
}
return localNode, nil
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return listener, nil
}
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
return nil, errors.Wrap(err, "could not open node's peer database")
}
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
udpEntry := enr.UDP(udpPort)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(ipEntry)
localNode.Set(udpEntry)
localNode.Set(tcpEntry)
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
return initializeSyncCommSubnets(localNode), nil
}
func (s *Service) startDiscoveryV5(
@@ -319,68 +262,58 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
// 5. Peer's fork digest in their ENR matches that of
// 1. The local node is still actively looking for peers to
// connect to.
// 2. Peer has a valid IP and TCP port set in their enr.
// 3. Peer hasn't been marked as 'bad'
// 4. Peer is not currently active or connected.
// 5. Peer is ready to receive incoming connections.
// 6. Peer's fork digest in their ENR matches that of
// our localnodes.
func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nil node entries passed in.
if node == nil {
return false
}
// Ignore nodes with no IP address stored.
// ignore nodes with no ip address stored.
if node.IP() == nil {
return false
}
peerData, multiAddrs, err := convertToAddrInfo(node)
// do not dial nodes with their tcp ports not set
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
return false
}
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
if peerData == nil || len(multiAddrs) == 0 {
return false
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) {
return false
}
// Ignore nodes that are already active.
if s.peers.IsActive(peerData.ID) {
return false
}
// Ignore nodes that are already connected.
if s.host.Network().Connectedness(peerData.ID) == network.Connected {
return false
}
// Ignore nodes that are not ready to receive incoming connections.
if !s.peers.IsReadyToDial(peerData.ID) {
return false
}
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
// Decide whether or not to connect to peer that does not
// match the proper fork ENR data with our local node.
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}
}
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
multiAddr := multiAddrs[0]
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
return true
}
@@ -405,17 +338,6 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
return activePeers >= maxPeers || numOfConns >= maxPeers
}
func (s *Service) wantedPeerDials() int {
maxPeers := int(s.cfg.MaxPeers)
activePeers := len(s.Peers().Active())
wantedCount := 0
if maxPeers > activePeers {
wantedCount = maxPeers - activePeers
}
return wantedCount
}
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
var allAddrs []ma.Multiaddr
@@ -432,16 +354,16 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
if err != nil {
return nil, errors.Wrapf(err, "Could not get enode from string")
}
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
addr, err := convertToSingleMultiAddr(enodeAddr)
if err != nil {
return nil, errors.Wrapf(err, "Could not get multiaddr")
}
allAddrs = append(allAddrs, nodeAddrs...)
allAddrs = append(allAddrs, addr)
}
return allAddrs, nil
}
func ParseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
discv5Nodes, _ = parseGenericAddrs(addrs)
if len(discv5Nodes) == 0 {
log.Warn("No bootstrap addresses supplied")
@@ -471,139 +393,45 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
}
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
// Expect each node to have a TCP and a QUIC address.
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
var multiAddrs []ma.Multiaddr
for _, node := range nodes {
// Skip nodes with no ip address stored.
// ignore nodes with no ip address stored
if node.IP() == nil {
continue
}
// Get up to two multiaddrs (TCP and QUIC) for each node.
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
log.WithError(err).Error("Could not convert to multiAddr")
continue
}
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
multiAddrs = append(multiAddrs, multiAddr)
}
return multiAddrs
}
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
return nil, nil, err
}
if len(multiAddrs) == 0 {
return nil, nil, nil
}
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
return nil, nil, err
}
if len(infos) != 1 {
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
}
return &infos[0], multiAddrs, nil
return info, multiAddr, nil
}
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
// If the node has a both a QUIC and a TCP port set in their ENR, then
// the multiaddr corresponding to the QUIC port is added first, followed
// by the multiaddr corresponding to the TCP port.
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
multiaddrs := make([]ma.Multiaddr, 0, 2)
// Retrieve the node public key.
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
pubkey := node.Pubkey()
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
if err != nil {
return nil, errors.Wrap(err, "could not get pubkey")
}
// Compute the node ID from the public key.
id, err := peer.IDFromPublicKey(assertedKey)
if err != nil {
return nil, errors.Wrap(err, "could not get peer id")
}
if features.Get().EnableQUIC {
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, quic)
if err != nil {
return nil, errors.Wrap(err, "could not get QUIC port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build QUIC address")
}
multiaddrs = append(multiaddrs, addr)
}
}
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, tcp)
if err != nil {
return nil, errors.Wrap(err, "could not get TCP port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build TCP address")
}
multiaddrs = append(multiaddrs, addr)
}
return multiaddrs, nil
}
// getPort retrieves the port for a given node and protocol, as well as a boolean
// indicating whether the port was found, and an error
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
var (
port uint
err error
)
switch protocol {
case tcp:
var entry enr.TCP
err = node.Load(&entry)
port = uint(entry)
case udp:
var entry enr.UDP
err = node.Load(&entry)
port = uint(entry)
case quic:
var entry quicProtocol
err = node.Load(&entry)
port = uint(entry)
default:
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
}
if enr.IsNotFound(err) {
return port, false, nil
}
if err != nil {
return 0, false, errors.Wrap(err, "could not get port")
}
return port, true, nil
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
}
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
@@ -621,14 +449,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
var ip4 enr.IPv4
var ip6 enr.IPv6
if node.Load(&ip4) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
}
addresses = append(addresses, address)
}
if node.Load(&ip6) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
}
@@ -655,9 +483,9 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) int {
func udpVersionFromIP(ipAddr net.IP) string {
if ipAddr.To4() != nil {
return udp4
return "udp4"
}
return udp6
return "udp6"
}

View File

@@ -42,6 +42,10 @@ import (
var discoveryWaitTime = 1 * time.Second
func init() {
rand.Seed(time.Now().Unix())
}
func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
ip, err := prysmNetwork.ExternalIPv4()
require.NoError(t, err, "Could not get ip")
@@ -99,8 +103,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
for i := 1; i <= 5; i++ {
port = 3000 + i
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
@@ -130,107 +134,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
}
}
func TestCreateLocalNode(t *testing.T) {
testCases := []struct {
name string
cfg *Config
expectedError bool
}{
{
name: "valid config",
cfg: nil,
expectedError: false,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
expectedError: true,
},
{
name: "valid host address",
cfg: &Config{HostAddress: "192.168.0.1"},
expectedError: false,
},
{
name: "invalid host DNS",
cfg: &Config{HostDNS: "invalid"},
expectedError: true,
},
{
name: "valid host DNS",
cfg: &Config{HostDNS: "www.google.com"},
expectedError: false,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
// Define ports.
const (
udpPort = 2000
tcpPort = 3000
quicPort = 3000
)
// Create a private key.
address, privKey := createAddrAndPrivKey(t)
// Create a service.
service := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: tt.cfg,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
if tt.expectedError {
require.NotNil(t, err)
return
}
require.NoError(t, err)
expectedAddress := address
if tt.cfg != nil && tt.cfg.HostAddress != "" {
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
}
// Check IP.
// IP is not checked int case of DNS, since it can be resolved to different IPs.
if tt.cfg == nil || tt.cfg.HostDNS == "" {
ip := new(net.IP)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("ip", ip)))
require.Equal(t, true, ip.Equal(expectedAddress))
require.Equal(t, true, localNode.Node().IP().Equal(expectedAddress))
}
// Check UDP.
udp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("udp", udp)))
require.Equal(t, udpPort, localNode.Node().UDP())
// Check TCP.
tcp := new(uint16)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry("tcp", tcp)))
require.Equal(t, tcpPort, localNode.Node().TCP())
// Check fork is set.
fork := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
require.NotEmpty(t, *fork)
// Check att subnets.
attSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(attSubnetEnrKey, attSubnets)))
require.DeepSSZEqual(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, *attSubnets)
// Check sync committees subnets.
syncSubnets := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
})
}
}
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
addr := net.ParseIP("invalidIP")
_, pkey := createAddrAndPrivKey(t)
@@ -238,7 +141,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
node, err := s.createLocalNode(pkey, addr, 0, 0)
require.NoError(t, err)
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
@@ -249,9 +152,8 @@ func TestMultiAddrConversion_OK(t *testing.T) {
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
cfg: &Config{
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
TCPPort: 0,
UDPPort: 0,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
@@ -317,7 +219,7 @@ func TestHostIsResolved(t *testing.T) {
// As defined in RFC 2606 , example.org is a
// reserved example domain name.
exampleHost := "example.org"
exampleIP := "93.184.215.14"
exampleIP := "93.184.216.34"
s := &Service{
cfg: &Config{
@@ -408,12 +310,12 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
}
func TestCorrectUDPVersion(t *testing.T) {
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, udp4, udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IPv6zero), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{200, 20, 12, 255}), "incorrect network version")
assert.Equal(t, "udp6", udpVersionFromIP(net.IP{22, 23, 24, 251, 17, 18, 0, 0, 0, 0, 12, 14, 212, 213, 16, 22}), "incorrect network version")
// v4 in v6
assert.Equal(t, udp4, udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
assert.Equal(t, "udp4", udpVersionFromIP(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 212, 213, 16, 22}), "incorrect network version")
}
// addPeer is a helper to add a peer with a given connection state)

View File

@@ -28,8 +28,7 @@ import (
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
const port = 2000
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
@@ -47,14 +46,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
}
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -99,14 +98,13 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
var addrs []ma.Multiaddr
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
@@ -116,11 +114,10 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.TraceLevel)
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
@@ -135,13 +132,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
Discv5BootStrapAddr: []string{bootNode.String()},
UDPPort: uint(port),
}
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -191,13 +188,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0, len(nodes))
var addrs []ma.Multiaddr
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
if len(addrs) == 0 {

View File

@@ -20,7 +20,7 @@ self=%s
%d peers
%v
`,
s.cfg.Discv5BootStrapAddrs,
s.cfg.BootstrapNodeAddr,
s.selfAddresses(),
len(s.host.Network().Peers()),
formatPeers(s.host), // Must be last. Writes one entry per row.

View File

@@ -2,14 +2,10 @@ package p2p
import (
"context"
"runtime"
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
)
const backOffCounter = 50
// filterNodes wraps an iterator such that Next only returns nodes for which
// the 'check' function returns true. This custom implementation also
// checks for context deadlines so that in the event the parent context has
@@ -28,21 +24,13 @@ type filterIter struct {
// Next looks up for the next valid node according to our
// filter criteria.
func (f *filterIter) Next() bool {
lookupCounter := 0
for f.Iterator.Next() {
// Do not excessively perform lookups if we constantly receive non-viable peers.
if lookupCounter > backOffCounter {
lookupCounter = 0
runtime.Gosched()
time.Sleep(30 * time.Second)
}
if f.Context.Err() != nil {
return false
}
if f.check(f.Node()) {
return true
}
lookupCounter++
}
return false
}

View File

@@ -1,7 +1,6 @@
package p2p
import (
"net"
"strconv"
"strings"
@@ -13,32 +12,32 @@ import (
var log = logrus.WithField("prefix", "p2p")
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
var correctAddr ma.Multiaddr
for _, addr := range addrs {
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
continue
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
correctAddr = addr
break
}
}
if correctAddr != nil {
log.WithField(
"multiAddr",
addr.String()+"/p2p/"+id.String(),
correctAddr.String()+"/p2p/"+id.String(),
).Info("Node started p2p server")
}
}
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
func logExternalIPAddr(id peer.ID, addr string, port uint) {
if addr != "" {
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
multiAddr, err := MultiAddressBuilder(addr, port)
if err != nil {
log.WithError(err).Error("Could not create multiaddress")
return
}
for _, multiAddr := range multiAddrs {
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
}

View File

@@ -4,129 +4,90 @@ import (
"crypto/ecdsa"
"fmt"
"net"
"time"
"github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
type internetProtocol string
const (
udp = "udp"
tcp = "tcp"
quic = "quic"
)
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
// Example: /ip4/1.2.3.4./tcp/5678
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
}
multiaddrs := []ma.Multiaddr{multiAddrTCP}
if features.Get().EnableQUIC {
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
if err != nil {
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
}
multiaddrs = append(multiaddrs, multiAddrQUIC)
}
return multiaddrs, nil
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
}
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
cfg := s.cfg
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
if cfg.LocalIP != "" {
localIP := net.ParseIP(cfg.LocalIP)
if localIP == nil {
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
if net.ParseIP(cfg.LocalIP) == nil {
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
}
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
log.WithError(err).Fatal("Failed to p2p listen")
}
}
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
if err != nil {
return nil, errors.Wrap(err, "cannot convert private key to interface private key. (Private key not displayed in logs for security reasons)")
log.WithError(err).Fatal("Failed to retrieve private key")
}
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
if err != nil {
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
log.WithError(err).Fatal("Failed to retrieve peer id")
}
log.Infof("Running node with peer id of %s ", id.String())
options := []libp2p.Option{
privKeyOption(priKey),
libp2p.ListenAddrs(multiaddrs...),
libp2p.ListenAddrs(listen),
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(libp2ptcp.NewTCPTransport),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.Security(noise.ID, noise.New),
libp2p.Ping(false), // Disable Ping Service.
}
if features.Get().EnableQUIC {
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
}
options = append(options, libp2p.Security(noise.ID, noise.New))
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
}
if cfg.RelayNodeAddr != "" {
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
} else {
// Disable relay if it has not been set.
options = append(options, libp2p.DisableRelay())
}
if cfg.HostAddress != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
if err != nil {
log.WithError(err).Error("Unable to create external multiaddress")
} else {
addrs = append(addrs, externalMultiaddrs...)
addrs = append(addrs, external)
}
return addrs
}))
}
if cfg.HostDNS != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
@@ -138,51 +99,26 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
return addrs
}))
}
// Disable Ping Service.
options = append(options, libp2p.Ping(false))
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options, nil
return options
}
func extractIpType(ip net.IP) (string, error) {
if ip.To4() != nil {
return "ip4", nil
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
if ip.To16() != nil {
return "ip6", nil
if id.String() == "" {
return nil, errors.New("empty peer id given")
}
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
}
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
var multiaddrStr string
if id == "" {
return nil, errors.Errorf("empty peer id given: %s", id)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
}
switch protocol {
case udp, tcp:
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
case quic:
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
default:
return nil, errors.Errorf("unsupported protocol: %s", protocol)
}
return ma.NewMultiaddr(multiaddrStr)
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
// Adds a private key to the libp2p option if the option was provided.
@@ -198,8 +134,3 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
return cfg.Apply(libp2p.Identity(ifaceKey))
}
}
// Configures stream timeouts on mplex.
func configureMplex() {
gomplex.ResetStreamTimeout = 5 * time.Second
}

Some files were not shown because too many files have changed in this diff Show More