mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
34 Commits
deneb-reba
...
deneb-inte
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41b7331ffc | ||
|
|
b0fc368185 | ||
|
|
777ca06b78 | ||
|
|
4d520460e0 | ||
|
|
27d8b1c358 | ||
|
|
17500dcfca | ||
|
|
0452fd02e8 | ||
|
|
09d761e1ab | ||
|
|
9a741c52d1 | ||
|
|
baed8da9c0 | ||
|
|
33410a0ec1 | ||
|
|
cda1797d4e | ||
|
|
e7f6048b8c | ||
|
|
418959565f | ||
|
|
8229f3eb84 | ||
|
|
4098d098aa | ||
|
|
46c72798c7 | ||
|
|
a5474200de | ||
|
|
a85b4445fc | ||
|
|
751dd847b8 | ||
|
|
aeb7a45864 | ||
|
|
e952fd802b | ||
|
|
b511eef848 | ||
|
|
7aa043892b | ||
|
|
36be057a11 | ||
|
|
049e608c75 | ||
|
|
4541598850 | ||
|
|
8c08854dd0 | ||
|
|
d2ff995eb2 | ||
|
|
56a0315dde | ||
|
|
634133fedc | ||
|
|
c1c1b7ecfa | ||
|
|
9a4670ec64 | ||
|
|
a664a07303 |
@@ -1 +1 @@
|
||||
6.2.1
|
||||
6.3.2
|
||||
|
||||
42
.github/workflows/fuzz.yml
vendored
Normal file
42
.github/workflows/fuzz.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: "fuzz"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 12 * * *"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
list:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
outputs:
|
||||
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
|
||||
|
||||
fuzz:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 360
|
||||
needs: list
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{fromJson(needs.list.outputs.fuzz-tests)}}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
fuzz-regexp: ${{ matrix.func }}
|
||||
fuzz-time: "20m"
|
||||
@@ -133,8 +133,8 @@ nogo(
|
||||
# nogo checks that fail with coverage enabled.
|
||||
":coverage_enabled": [],
|
||||
"//conditions:default": [
|
||||
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
|
||||
],
|
||||
}),
|
||||
)
|
||||
|
||||
45
WORKSPACE
45
WORKSPACE
@@ -67,10 +67,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -94,10 +94,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -172,7 +172,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.6",
|
||||
go_version = "1.20.7",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -312,13 +312,6 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_github_bazelbuild_buildtools",
|
||||
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
|
||||
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
|
||||
@@ -332,22 +325,6 @@ http_archive(
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
# External dependencies
|
||||
|
||||
http_archive(
|
||||
name = "prysm_web_ui",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "site",
|
||||
srcs = glob(["**/*"]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
@@ -379,10 +356,6 @@ load(
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
@@ -391,10 +364,6 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
@@ -109,8 +110,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", sr).
|
||||
WithField("block_root", br).
|
||||
WithField("state_root", hexutil.Encode(sr[:])).
|
||||
WithField("block_root", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
|
||||
@@ -182,7 +182,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState, optimistic bool) error {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -198,7 +198,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
|
||||
if err := s.setHeadInitialSync(r, bCp, hs, optimistic); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
return nil
|
||||
@@ -227,7 +227,7 @@ func (s *Service) setHead(newHead *head) error {
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
// assumes that state being passed into the method will not be modified by any other alternate
|
||||
// caller which holds the state's reference.
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState, optimistic bool) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
@@ -237,9 +237,10 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
s.head = &head{
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: state,
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: state,
|
||||
optimistic: optimistic,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
|
||||
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
|
||||
@@ -329,7 +329,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
|
||||
@@ -62,7 +62,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r) {
|
||||
return errors.New("FFG and LMD votes are not consistent")
|
||||
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -185,16 +185,18 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
return err
|
||||
}
|
||||
|
||||
lastBR := blkRoots[len(blkRoots)-1]
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(lastBR)
|
||||
if err != nil {
|
||||
lastSlot := blocks[len(blocks)-1].Block().Slot()
|
||||
log.WithError(err).Errorf("Could not check if block is optimistic, Root: %#x, Slot: %d", lastBR, lastSlot)
|
||||
optimistic = true
|
||||
}
|
||||
for i, b := range blocks {
|
||||
blockCopy, err := b.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blkRoots[i])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
|
||||
@@ -357,7 +357,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState, false))
|
||||
|
||||
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,6 +50,8 @@ func ProcessVoluntaryExits(
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
@@ -61,8 +63,15 @@ func ProcessVoluntaryExits(
|
||||
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
if err == nil {
|
||||
if exitEpoch > maxExitEpoch {
|
||||
maxExitEpoch = exitEpoch
|
||||
churn = 1
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ValidatorAlreadyExitedErr) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,8 +110,11 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
isActive := helpers.IsActiveValidator(validator, currentEpoch)
|
||||
belowEjectionBalance := validator.EffectiveBalance <= ejectionBal
|
||||
if isActive && belowEjectionBalance {
|
||||
state, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -13,11 +13,34 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// ValidatorAlreadyExitedErr is an error raised when trying to process an exit of
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
|
||||
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
if e != farFutureEpoch {
|
||||
if e > maxExitEpoch {
|
||||
maxExitEpoch = e
|
||||
churn = 1
|
||||
} else if e == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
_ = err
|
||||
return
|
||||
}
|
||||
|
||||
// InitiateValidatorExit takes in validator index and updates
|
||||
// validator with correct voluntary exit parameters.
|
||||
//
|
||||
@@ -42,73 +65,43 @@ import (
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) (state.BeaconState, error) {
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, nil
|
||||
}
|
||||
var exitEpochs []primitives.Epoch
|
||||
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exitEpochs = append(exitEpochs, helpers.ActivationExitEpoch(time.CurrentEpoch(s)))
|
||||
|
||||
// Obtain the exit queue epoch as the maximum number in the exit epochs array.
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() == exitQueueEpoch {
|
||||
var mErr error
|
||||
exitQueueChurn, mErr = mathutil.Add64(exitQueueChurn, 1)
|
||||
if mErr != nil {
|
||||
return mErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return s, validator.ExitEpoch, ValidatorAlreadyExitedErr
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
return nil, 0, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
|
||||
if exitQueueChurn >= churn {
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitQueueEpoch
|
||||
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
return s, nil
|
||||
return s, exitQueueEpoch, nil
|
||||
}
|
||||
|
||||
// SlashValidator slashes the malicious validator's balance and awards
|
||||
@@ -144,8 +137,9 @@ func SlashValidator(
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
s, err := InitiateValidatorExit(ctx, s, slashedIdx)
|
||||
if err != nil {
|
||||
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
|
||||
@@ -48,8 +48,9 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := InitiateValidatorExit(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
|
||||
require.ErrorIs(t, err, ValidatorAlreadyExitedErr)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
|
||||
@@ -66,8 +67,9 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := InitiateValidatorExit(context.Background(), state, idx)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+2, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -85,8 +87,9 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := InitiateValidatorExit(context.Background(), state, idx)
|
||||
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+3, epoch)
|
||||
|
||||
// Because of exit queue overflow,
|
||||
// validator who init exited has to wait one more epoch.
|
||||
@@ -106,7 +109,7 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, err = InitiateValidatorExit(context.Background(), state, 1)
|
||||
_, _, err = InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
@@ -337,3 +340,78 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconState
|
||||
wantedEpoch primitives.Epoch
|
||||
wantedChurn uint64
|
||||
}{
|
||||
{
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedEpoch: 0,
|
||||
wantedChurn: 3,
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedEpoch: 0,
|
||||
wantedChurn: 0,
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedEpoch: 1,
|
||||
wantedChurn: 2,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,8 +158,7 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
|
||||
require.Equal(t, false, cliCtx.IsSet(cmd.BootstrapNode.Name))
|
||||
require.Equal(t, true, cliCtx.IsSet(cmd.BootstrapNode.Name))
|
||||
|
||||
require.Equal(t, strings.Join([]string{"node1", "node2"}, ","),
|
||||
strings.Join(cliCtx.StringSlice(cmd.BootstrapNode.Name), ","))
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/builder:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
|
||||
@@ -16,7 +16,7 @@ go_library(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
|
||||
@@ -63,51 +63,6 @@ func wrapFeeRecipientsArray(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapSignedValidatorRegistrationsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SignedValidatorRegistrationsRequestJson); !ok {
|
||||
return true, nil
|
||||
}
|
||||
registrations := make([]*SignedValidatorRegistrationJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(®istrations); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SignedValidatorRegistrationsRequestJson{Registrations: registrations}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapAttestationsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitAttestationRequestJson); ok {
|
||||
atts := make([]*AttestationJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&atts); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitAttestationRequestJson{Data: atts}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array of validator indices.
|
||||
// We make it more proto-friendly by wrapping it in a struct with an 'Index' field.
|
||||
func wrapValidatorIndicesArray(
|
||||
@@ -130,50 +85,6 @@ func wrapValidatorIndicesArray(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconCommitteeSubnet expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapBeaconCommitteeSubscriptionsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitBeaconCommitteeSubscriptionsRequestJson); ok {
|
||||
data := make([]*BeaconCommitteeSubscribeJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitBeaconCommitteeSubscriptionsRequestJson{Data: data}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/prepareSyncCommitteeSubnets expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapSyncCommitteeSubscriptionsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitSyncCommitteeSubscriptionRequestJson); ok {
|
||||
data := make([]*SyncCommitteeSubscriptionJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitSyncCommitteeSubscriptionRequestJson{Data: data}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolSyncCommitteeSignatures expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapSyncCommitteeSignaturesArray(
|
||||
|
||||
@@ -19,46 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestWrapAttestationArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitAttestationRequestJson{},
|
||||
}
|
||||
unwrappedAtts := []*AttestationJson{{AggregationBits: "1010"}}
|
||||
unwrappedAttsJson, err := json.Marshal(unwrappedAtts)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedAttsJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedAtts := &SubmitAttestationRequestJson{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAtts))
|
||||
require.Equal(t, 1, len(wrappedAtts.Data), "wrong number of wrapped items")
|
||||
assert.Equal(t, "1010", wrappedAtts.Data[0].AggregationBits)
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitAttestationRequestJson{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrapValidatorIndicesArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
@@ -140,111 +100,6 @@ func TestWrapBLSChangesArray(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrapBeaconCommitteeSubscriptionsArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
|
||||
}
|
||||
unwrappedSubs := []*BeaconCommitteeSubscribeJson{{
|
||||
ValidatorIndex: "1",
|
||||
CommitteeIndex: "1",
|
||||
CommitteesAtSlot: "1",
|
||||
Slot: "1",
|
||||
IsAggregator: true,
|
||||
}}
|
||||
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedSubsJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedSubs := &SubmitBeaconCommitteeSubscriptionsRequestJson{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
|
||||
require.Equal(t, 1, len(wrappedSubs.Data), "wrong number of wrapped items")
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteeIndex)
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteesAtSlot)
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].Slot)
|
||||
assert.Equal(t, true, wrappedSubs.Data[0].IsAggregator)
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrapSyncCommitteeSubscriptionsArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
|
||||
}
|
||||
unwrappedSubs := []*SyncCommitteeSubscriptionJson{
|
||||
{
|
||||
ValidatorIndex: "1",
|
||||
SyncCommitteeIndices: []string{"1", "2"},
|
||||
UntilEpoch: "1",
|
||||
},
|
||||
{
|
||||
ValidatorIndex: "2",
|
||||
SyncCommitteeIndices: []string{"3", "4"},
|
||||
UntilEpoch: "2",
|
||||
},
|
||||
}
|
||||
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedSubsJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedSubs := &SubmitSyncCommitteeSubscriptionRequestJson{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
|
||||
require.Equal(t, 2, len(wrappedSubs.Data), "wrong number of wrapped items")
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
|
||||
require.Equal(t, 2, len(wrappedSubs.Data[0].SyncCommitteeIndices), "wrong number of committee indices")
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].SyncCommitteeIndices[0])
|
||||
assert.Equal(t, "2", wrappedSubs.Data[0].SyncCommitteeIndices[1])
|
||||
assert.Equal(t, "1", wrappedSubs.Data[0].UntilEpoch)
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
|
||||
@@ -35,7 +35,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/beacon/blocks/{block_id}/root",
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations",
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}",
|
||||
"/eth/v1/beacon/pool/attestations",
|
||||
"/eth/v1/beacon/pool/attester_slashings",
|
||||
"/eth/v1/beacon/pool/proposer_slashings",
|
||||
"/eth/v1/beacon/pool/voluntary_exits",
|
||||
@@ -48,7 +47,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/node/peers/{peer_id}",
|
||||
"/eth/v1/node/peer_count",
|
||||
"/eth/v1/node/version",
|
||||
"/eth/v1/node/syncing",
|
||||
"/eth/v1/node/health",
|
||||
"/eth/v1/debug/beacon/states/{state_id}",
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
@@ -65,12 +63,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/validator/blocks/{slot}",
|
||||
"/eth/v2/validator/blocks/{slot}",
|
||||
"/eth/v1/validator/blinded_blocks/{slot}",
|
||||
"/eth/v1/validator/attestation_data",
|
||||
"/eth/v1/validator/beacon_committee_subscriptions",
|
||||
"/eth/v1/validator/sync_committee_subscriptions",
|
||||
"/eth/v1/validator/sync_committee_contribution",
|
||||
"/eth/v1/validator/prepare_beacon_proposer",
|
||||
"/eth/v1/validator/register_validator",
|
||||
"/eth/v1/validator/liveness/{epoch}",
|
||||
}
|
||||
}
|
||||
@@ -143,14 +136,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlindedBeaconBlockSSZ}
|
||||
case "/eth/v1/beacon/pool/attestations":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
|
||||
endpoint.GetResponse = &AttestationsPoolResponseJson{}
|
||||
endpoint.PostRequest = &SubmitAttestationRequestJson{}
|
||||
endpoint.Err = &IndexedVerificationFailureErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapAttestationsArray,
|
||||
}
|
||||
case "/eth/v1/beacon/pool/attester_slashings":
|
||||
endpoint.PostRequest = &AttesterSlashingJson{}
|
||||
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
|
||||
@@ -187,8 +172,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.GetResponse = &PeerCountResponseJson{}
|
||||
case "/eth/v1/node/version":
|
||||
endpoint.GetResponse = &VersionResponseJson{}
|
||||
case "/eth/v1/node/syncing":
|
||||
endpoint.GetResponse = &SyncingResponseJson{}
|
||||
case "/eth/v1/node/health":
|
||||
// Use default endpoint
|
||||
case "/eth/v1/debug/beacon/states/{state_id}":
|
||||
@@ -257,34 +240,11 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
|
||||
case "/eth/v1/validator/attestation_data":
|
||||
endpoint.GetResponse = &ProduceAttestationDataResponseJson{}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
|
||||
case "/eth/v1/validator/beacon_committee_subscriptions":
|
||||
endpoint.PostRequest = &SubmitBeaconCommitteeSubscriptionsRequestJson{}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapBeaconCommitteeSubscriptionsArray,
|
||||
}
|
||||
case "/eth/v1/validator/sync_committee_subscriptions":
|
||||
endpoint.PostRequest = &SubmitSyncCommitteeSubscriptionRequestJson{}
|
||||
endpoint.Err = &NodeSyncDetailsErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSubscriptionsArray,
|
||||
}
|
||||
case "/eth/v1/validator/sync_committee_contribution":
|
||||
endpoint.GetResponse = &ProduceSyncCommitteeContributionResponseJson{}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "subcommittee_index"}, {Name: "beacon_block_root", Hex: true}}
|
||||
case "/eth/v1/validator/prepare_beacon_proposer":
|
||||
endpoint.PostRequest = &FeeRecipientsRequestJSON{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
|
||||
}
|
||||
case "/eth/v1/validator/register_validator":
|
||||
endpoint.PostRequest = &SignedValidatorRegistrationsRequestJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
|
||||
}
|
||||
case "/eth/v1/validator/liveness/{epoch}":
|
||||
endpoint.PostRequest = &ValidatorIndicesJson{}
|
||||
endpoint.PostResponse = &LivenessResponseJson{}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
)
|
||||
|
||||
@@ -143,14 +143,6 @@ type BlockAttestationsResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type AttestationsPoolResponseJson struct {
|
||||
Data []*AttestationJson `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationRequestJson struct {
|
||||
Data []*AttestationJson `json:"data"`
|
||||
}
|
||||
|
||||
type AttesterSlashingsPoolResponseJson struct {
|
||||
Data []*AttesterSlashingJson `json:"data"`
|
||||
}
|
||||
@@ -199,7 +191,7 @@ type VersionResponseJson struct {
|
||||
}
|
||||
|
||||
type SyncingResponseJson struct {
|
||||
Data *helpers.SyncDetailsJson `json:"data"`
|
||||
Data *shared.SyncDetails `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconStateResponseJson struct {
|
||||
@@ -268,18 +260,10 @@ type ProduceBlindedBlockResponseJson struct {
|
||||
Data *BlindedBeaconBlockContainerJson `json:"data"`
|
||||
}
|
||||
|
||||
type ProduceAttestationDataResponseJson struct {
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
}
|
||||
|
||||
type AggregateAttestationResponseJson struct {
|
||||
Data *AttestationJson `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitBeaconCommitteeSubscriptionsRequestJson struct {
|
||||
Data []*BeaconCommitteeSubscribeJson `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconCommitteeSubscribeJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
CommitteeIndex string `json:"committee_index"`
|
||||
@@ -288,16 +272,6 @@ type BeaconCommitteeSubscribeJson struct {
|
||||
IsAggregator bool `json:"is_aggregator"`
|
||||
}
|
||||
|
||||
type SubmitSyncCommitteeSubscriptionRequestJson struct {
|
||||
Data []*SyncCommitteeSubscriptionJson `json:"data"`
|
||||
}
|
||||
|
||||
type SyncCommitteeSubscriptionJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices"`
|
||||
UntilEpoch string `json:"until_epoch"`
|
||||
}
|
||||
|
||||
type ProduceSyncCommitteeContributionResponseJson struct {
|
||||
Data *SyncCommitteeContributionJson `json:"data"`
|
||||
}
|
||||
@@ -996,22 +970,6 @@ type SyncCommitteeContributionJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type ValidatorRegistrationJson struct {
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedValidatorRegistrationJson struct {
|
||||
Message *ValidatorRegistrationJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedValidatorRegistrationsRequestJson struct {
|
||||
Registrations []*SignedValidatorRegistrationJson `json:"registrations"`
|
||||
}
|
||||
|
||||
type ForkChoiceNodeJson struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
@@ -1211,7 +1169,7 @@ type SingleIndexedVerificationFailureJson struct {
|
||||
|
||||
type NodeSyncDetailsErrorJson struct {
|
||||
apimiddleware.DefaultErrorJson
|
||||
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
|
||||
SyncDetails shared.SyncDetails `json:"sync_details"`
|
||||
}
|
||||
|
||||
type EventErrorJson struct {
|
||||
|
||||
@@ -5,12 +5,14 @@ go_library(
|
||||
srcs = [
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"service.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
@@ -20,12 +22,19 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
|
||||
22
beacon-chain/rpc/core/service.go
Normal file
22
beacon-chain/rpc/core/service.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
OperationNotifier opfeed.Notifier
|
||||
AttestationCache *cache.AttestationCache
|
||||
StateGen stategen.StateManager
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
@@ -16,14 +16,17 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -48,18 +51,21 @@ func (e *AggregateBroadcastFailedError) Error() string {
|
||||
|
||||
// ComputeValidatorPerformance reports the validator's latest balance along with other important metrics on
|
||||
// rewards and penalties throughout its lifecycle in the beacon chain.
|
||||
func ComputeValidatorPerformance(
|
||||
func (s *Service) ComputeValidatorPerformance(
|
||||
ctx context.Context,
|
||||
req *ethpb.ValidatorPerformanceRequest,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
currSlot primitives.Slot,
|
||||
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
|
||||
headState, err := headFetcher.HeadState(ctx)
|
||||
if s.SyncChecker.Syncing() {
|
||||
return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")}
|
||||
}
|
||||
|
||||
headState, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
|
||||
}
|
||||
currSlot := s.GenesisTimeFetcher.CurrentSlot()
|
||||
if currSlot > headState.Slot() {
|
||||
headRoot, err := headFetcher.HeadRoot(ctx)
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
|
||||
}
|
||||
@@ -200,21 +206,18 @@ func ComputeValidatorPerformance(
|
||||
|
||||
// SubmitSignedContributionAndProof is called by a sync committee aggregator
|
||||
// to submit signed contribution and proof object.
|
||||
func SubmitSignedContributionAndProof(
|
||||
func (s *Service) SubmitSignedContributionAndProof(
|
||||
ctx context.Context,
|
||||
s *ethpb.SignedContributionAndProof,
|
||||
broadcaster p2p.Broadcaster,
|
||||
pool synccommittee.Pool,
|
||||
notifier opfeed.Notifier,
|
||||
req *ethpb.SignedContributionAndProof,
|
||||
) *RpcError {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
|
||||
errs.Go(func() error {
|
||||
return broadcaster.Broadcast(ctx, s)
|
||||
return s.Broadcaster.Broadcast(ctx, req)
|
||||
})
|
||||
|
||||
if err := pool.SaveSyncCommitteeContribution(s.Message.Contribution); err != nil {
|
||||
if err := s.SyncCommitteePool.SaveSyncCommitteeContribution(req.Message.Contribution); err != nil {
|
||||
return &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
|
||||
@@ -224,10 +227,10 @@ func SubmitSignedContributionAndProof(
|
||||
return &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
|
||||
notifier.OperationFeed().Send(&feed.Event{
|
||||
s.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.SyncCommitteeContributionReceived,
|
||||
Data: &opfeed.SyncCommitteeContributionReceivedData{
|
||||
Contribution: s,
|
||||
Contribution: req,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -235,11 +238,9 @@ func SubmitSignedContributionAndProof(
|
||||
}
|
||||
|
||||
// SubmitSignedAggregateSelectionProof verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
|
||||
func SubmitSignedAggregateSelectionProof(
|
||||
func (s *Service) SubmitSignedAggregateSelectionProof(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedAggregateSubmitRequest,
|
||||
genesisTime time.Time,
|
||||
broadcaster p2p.Broadcaster,
|
||||
) *RpcError {
|
||||
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
|
||||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
|
||||
@@ -253,11 +254,11 @@ func SubmitSignedAggregateSelectionProof(
|
||||
|
||||
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
|
||||
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
|
||||
genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
s.GenesisTimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return &RpcError{Err: errors.New("attestation slot is no longer valid from current time"), Reason: BadRequest}
|
||||
}
|
||||
|
||||
if err := broadcaster.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
|
||||
if err := s.Broadcaster.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
|
||||
return &RpcError{Err: &AggregateBroadcastFailedError{err: err}, Reason: Internal}
|
||||
}
|
||||
|
||||
@@ -270,3 +271,188 @@ func SubmitSignedAggregateSelectionProof(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AggregatedSigAndAggregationBits returns the aggregated signature and aggregation bits
|
||||
// associated with a particular set of sync committee messages.
|
||||
func (s *Service) AggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
req *ethpb.AggregatedSigAndAggregationBitsRequest) ([]byte, []byte, error) {
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
sigs := make([][]byte, 0, subCommitteeSize)
|
||||
bits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
for _, msg := range req.Msgs {
|
||||
if bytes.Equal(req.BlockRoot, msg.BlockRoot) {
|
||||
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, req.Slot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get sync subcommittee index")
|
||||
}
|
||||
for _, index := range headSyncCommitteeIndices {
|
||||
i := uint64(index)
|
||||
subnetIndex := i / subCommitteeSize
|
||||
indexMod := i % subCommitteeSize
|
||||
if subnetIndex == req.SubnetId && !bits.BitAt(indexMod) {
|
||||
bits.SetBitAt(indexMod, true)
|
||||
sigs = append(sigs, msg.Signature)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
aggregatedSig := make([]byte, 96)
|
||||
aggregatedSig[0] = 0xC0
|
||||
if len(sigs) != 0 {
|
||||
uncompressedSigs, err := bls.MultipleSignaturesFromBytes(sigs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not decompress signatures")
|
||||
}
|
||||
aggregatedSig = bls.AggregateSignatures(uncompressedSigs).Marshal()
|
||||
}
|
||||
return aggregatedSig, bits, nil
|
||||
}
|
||||
|
||||
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func AssignValidatorToSubnet(pubkey []byte, status validator.ValidatorStatus) {
|
||||
if status != validator.Active {
|
||||
return
|
||||
}
|
||||
assignValidatorToSubnet(pubkey)
|
||||
}
|
||||
|
||||
// AssignValidatorToSubnetProto checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
//
|
||||
// It has a Proto suffix because the status is a protobuf type.
|
||||
func AssignValidatorToSubnetProto(pubkey []byte, status ethpb.ValidatorStatus) {
|
||||
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
|
||||
return
|
||||
}
|
||||
assignValidatorToSubnet(pubkey)
|
||||
}
|
||||
|
||||
func assignValidatorToSubnet(pubkey []byte) {
|
||||
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
|
||||
if ok && expTime.After(prysmTime.Now()) {
|
||||
return
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
var assignedIdxs []uint64
|
||||
randGen := rand.NewGenerator()
|
||||
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
|
||||
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
|
||||
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
|
||||
}
|
||||
|
||||
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
|
||||
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
|
||||
|
||||
totalDuration := epochDuration * time.Duration(assignedDuration)
|
||||
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
|
||||
}
|
||||
|
||||
// GetAttestationData requests that the beacon node produces attestation data for
|
||||
// the requested committee index and slot based on the nodes current head.
|
||||
func (s *Service) GetAttestationData(
|
||||
ctx context.Context, req *ethpb.AttestationDataRequest,
|
||||
) (*ethpb.AttestationData, *RpcError) {
|
||||
if err := helpers.ValidateAttestationTime(
|
||||
req.Slot,
|
||||
s.GenesisTimeFetcher.GenesisTime(),
|
||||
params.BeaconNetworkConfig().MaximumGossipClockDisparity,
|
||||
); err != nil {
|
||||
return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: %v", err)}
|
||||
}
|
||||
|
||||
res, err := s.AttestationCache.Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
|
||||
}
|
||||
if res != nil {
|
||||
res.CommitteeIndex = req.CommitteeIndex
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if err := s.AttestationCache.MarkInProgress(req); err != nil {
|
||||
if errors.Is(err, cache.ErrAlreadyInProgress) {
|
||||
res, err := s.AttestationCache.Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
|
||||
}
|
||||
if res == nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.New("a request was in progress and resolved to nil")}
|
||||
}
|
||||
res.CommitteeIndex = req.CommitteeIndex
|
||||
return res, nil
|
||||
}
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not mark attestation as in-progress: %v", err)}
|
||||
}
|
||||
defer func() {
|
||||
if err := s.AttestationCache.MarkNotInProgress(req); err != nil {
|
||||
log.WithError(err).Error("could not mark attestation as not-in-progress")
|
||||
}
|
||||
}()
|
||||
|
||||
headState, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head state: %v", err)}
|
||||
}
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head root: %v", err)}
|
||||
}
|
||||
|
||||
// In the case that we receive an attestation request after a newer state/block has been processed.
|
||||
if headState.Slot() > req.Slot {
|
||||
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head root: %v", err)}
|
||||
}
|
||||
headState, err = s.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head state: %v", err)}
|
||||
}
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.New("could not lookup parent state from head")}
|
||||
}
|
||||
|
||||
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
|
||||
}
|
||||
}
|
||||
|
||||
targetEpoch := coreTime.CurrentEpoch(headState)
|
||||
epochStartSlot, err := slots.EpochStart(targetEpoch)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not calculate epoch start: %v", err)}
|
||||
}
|
||||
var targetRoot []byte
|
||||
if epochStartSlot == headState.Slot() {
|
||||
targetRoot = headRoot
|
||||
} else {
|
||||
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get target block for slot %d: %v", epochStartSlot, err)}
|
||||
}
|
||||
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
|
||||
targetRoot = headRoot
|
||||
}
|
||||
}
|
||||
|
||||
res = ðpb.AttestationData{
|
||||
Slot: req.Slot,
|
||||
CommitteeIndex: req.CommitteeIndex,
|
||||
BeaconBlockRoot: headRoot,
|
||||
Source: headState.CurrentJustifiedCheckpoint(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: targetEpoch,
|
||||
Root: targetRoot,
|
||||
},
|
||||
}
|
||||
|
||||
if err := s.AttestationCache.Put(ctx, req, res); err != nil {
|
||||
log.WithError(err).Error("could not store attestation data in cache")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"log.go",
|
||||
"pool.go",
|
||||
"server.go",
|
||||
@@ -37,6 +38,7 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -49,6 +51,7 @@ go_library(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
@@ -83,6 +86,7 @@ go_test(
|
||||
"blinded_blocks_test.go",
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_test.go",
|
||||
"init_test.go",
|
||||
"pool_test.go",
|
||||
@@ -109,6 +113,7 @@ go_test(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
@@ -120,12 +125,14 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
@@ -137,6 +144,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
@@ -38,7 +38,7 @@ const (
|
||||
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
|
||||
// query parameter.
|
||||
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ, err := http2.SszRequested(r)
|
||||
@@ -302,7 +302,7 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
|
||||
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
|
||||
// `broadcast_validation` query parameter.
|
||||
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ, err := http2.SszRequested(r)
|
||||
@@ -631,29 +631,3 @@ func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
|
||||
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Message: "Could not check if node is syncing: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
http2.WriteError(w, errJson)
|
||||
return false
|
||||
}
|
||||
if isSyncing {
|
||||
msg := "Beacon node is currently syncing and not serving request on that endpoint"
|
||||
details, err := json.Marshal(syncDetails)
|
||||
if err == nil {
|
||||
msg += " Details: " + string(details)
|
||||
}
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Message: msg,
|
||||
Code: http.StatusServiceUnavailable,
|
||||
}
|
||||
http2.WriteError(w, errJson)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
170
beacon-chain/rpc/eth/beacon/handlers_pool.go
Normal file
170
beacon-chain/rpc/eth/beacon/handlers_pool.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ListAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.ListAttestations")
|
||||
defer span.End()
|
||||
|
||||
ok, rawSlot, slot := shared.UintFromQuery(w, r, "slot")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ok, rawCommitteeIndex, committeeIndex := shared.UintFromQuery(w, r, "committee_index")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
attestations := s.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attestations = append(attestations, unaggAtts...)
|
||||
isEmptyReq := rawSlot == "" && rawCommitteeIndex == ""
|
||||
if isEmptyReq {
|
||||
allAtts := make([]*shared.Attestation, len(attestations))
|
||||
for i, att := range attestations {
|
||||
allAtts[i] = shared.AttestationFromConsensus(att)
|
||||
}
|
||||
http2.WriteJson(w, &ListAttestationsResponse{Data: allAtts})
|
||||
return
|
||||
}
|
||||
|
||||
bothDefined := rawSlot != "" && rawCommitteeIndex != ""
|
||||
filteredAtts := make([]*shared.Attestation, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
committeeIndexMatch := rawCommitteeIndex != "" && att.Data.CommitteeIndex == primitives.CommitteeIndex(committeeIndex)
|
||||
slotMatch := rawSlot != "" && att.Data.Slot == primitives.Slot(slot)
|
||||
shouldAppend := (bothDefined && committeeIndexMatch && slotMatch) || (!bothDefined && (committeeIndexMatch || slotMatch))
|
||||
if shouldAppend {
|
||||
filteredAtts = append(filteredAtts, shared.AttestationFromConsensus(att))
|
||||
}
|
||||
}
|
||||
http2.WriteJson(w, &ListAttestationsResponse{Data: filteredAtts})
|
||||
}
|
||||
|
||||
// SubmitAttestations submits an Attestation object to node. If the attestation passes all validation
|
||||
// constraints, node MUST publish the attestation on an appropriate subnet.
|
||||
func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestations")
|
||||
defer span.End()
|
||||
|
||||
if r.Body == http.NoBody {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var req SubmitAttestationsRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(req.Data) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var validAttestations []*ethpbalpha.Attestation
|
||||
var attFailures []*shared.IndexedVerificationFailure
|
||||
for i, sourceAtt := range req.Data {
|
||||
att, err := sourceAtt.ToConsensus()
|
||||
if err != nil {
|
||||
attFailures = append(attFailures, &shared.IndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Could not convert request attestation to consensus attestation: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
if _, err = bls.SignatureFromBytes(att.Signature); err != nil {
|
||||
attFailures = append(attFailures, &shared.IndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Incorrect attestation signature: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
// Note we can't send for aggregated att because we don't have selection proof.
|
||||
if !corehelpers.IsAggregated(att) {
|
||||
s.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
validAttestations = append(validAttestations, att)
|
||||
}
|
||||
|
||||
failedBroadcasts := make([]string, 0)
|
||||
for i, att := range validAttestations {
|
||||
// Determine subnet to broadcast attestation to
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head validator indices: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
|
||||
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
|
||||
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
|
||||
}
|
||||
|
||||
if corehelpers.IsAggregated(att) {
|
||||
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("could not save aggregated att")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("could not save unaggregated att")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failedBroadcasts) > 0 {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Attestations at index %s could not be broadcasted", strings.Join(failedBroadcasts, ", ")),
|
||||
http.StatusInternalServerError,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if len(attFailures) > 0 {
|
||||
failuresErr := &shared.IndexedVerificationFailureError{
|
||||
Code: http.StatusBadRequest,
|
||||
Message: "One or more attestations failed validation",
|
||||
Failures: attFailures,
|
||||
}
|
||||
http2.WriteError(w, failuresErr)
|
||||
}
|
||||
}
|
||||
375
beacon-chain/rpc/eth/beacon/handlers_pool_test.go
Normal file
375
beacon-chain/rpc/eth/beacon/handlers_pool_test.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestListAttestations(t *testing.T) {
|
||||
att1 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: []byte{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
}
|
||||
att2 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: []byte{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
att3 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
}
|
||||
att4 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]*ethpbv1alpha1.Attestation{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]*ethpbv1alpha1.Attestation{att3, att4}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 4, len(resp.Data))
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
})
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
url := "http://example.com?committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2&committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 1, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitAttestations(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.SlotsPerEpoch = 1
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validators := []*ethpbv1alpha1.Validator{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = validators
|
||||
state.Slot = 1
|
||||
state.PreviousJustifiedCheckpoint = ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
b := bitfield.NewBitlist(1)
|
||||
b.SetBitAt(0, true)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
}
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s.Broadcaster = broadcaster
|
||||
s.AttestationsPool = attestations.NewPool()
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
assert.Equal(t, 1, len(broadcaster.BroadcastAttestations))
|
||||
assert.Equal(t, "0x03", hexutil.Encode(broadcaster.BroadcastAttestations[0].AggregationBits))
|
||||
assert.Equal(t, "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15", hexutil.Encode(broadcaster.BroadcastAttestations[0].Signature))
|
||||
assert.Equal(t, primitives.Slot(0), broadcaster.BroadcastAttestations[0].Data.Slot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), broadcaster.BroadcastAttestations[0].Data.CommitteeIndex)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.BeaconBlockRoot))
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.Source.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].Data.Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].Data.Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].Data.Target.Epoch)
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s.Broadcaster = broadcaster
|
||||
s.AttestationsPool = attestations.NewPool()
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(multipleAtts)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
assert.Equal(t, 2, len(broadcaster.BroadcastAttestations))
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(invalidAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.Equal(t, 1, len(e.Failures))
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
singleAtt = `[
|
||||
{
|
||||
"aggregation_bits": "0x03",
|
||||
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
|
||||
"data": {
|
||||
"slot": "0",
|
||||
"index": "0",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "0",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "0",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
}
|
||||
}
|
||||
}
|
||||
]`
|
||||
multipleAtts = `[
|
||||
{
|
||||
"aggregation_bits": "0x03",
|
||||
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
|
||||
"data": {
|
||||
"slot": "0",
|
||||
"index": "0",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "0",
|
||||
"root": "0x736f75726365726f6f7431000000000000000000000000000000000000000000"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "0",
|
||||
"root": "0x746172676574726f6f7431000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"aggregation_bits": "0x03",
|
||||
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
|
||||
"data": {
|
||||
"slot": "0",
|
||||
"index": "0",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "0",
|
||||
"root": "0x736f75726365726f6f7431000000000000000000000000000000000000000000"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "0",
|
||||
"root": "0x746172676574726f6f7432000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
}
|
||||
]`
|
||||
// signature is invalid
|
||||
invalidAtt = `[
|
||||
{
|
||||
"aggregation_bits": "0x03",
|
||||
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"data": {
|
||||
"slot": "0",
|
||||
"index": "0",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"source": {
|
||||
"epoch": "0",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"target": {
|
||||
"epoch": "0",
|
||||
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
}
|
||||
}
|
||||
}
|
||||
]`
|
||||
)
|
||||
@@ -8,11 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
@@ -27,121 +25,6 @@ import (
|
||||
|
||||
const broadcastBLSChangesRateLimit = 128
|
||||
|
||||
// ListPoolAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpbv1.AttestationsPoolRequest) (*ethpbv1.AttestationsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
|
||||
defer span.End()
|
||||
|
||||
attestations := bs.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := bs.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get unaggregated attestations: %v", err)
|
||||
}
|
||||
attestations = append(attestations, unaggAtts...)
|
||||
isEmptyReq := req.Slot == nil && req.CommitteeIndex == nil
|
||||
if isEmptyReq {
|
||||
allAtts := make([]*ethpbv1.Attestation, len(attestations))
|
||||
for i, att := range attestations {
|
||||
allAtts[i] = migration.V1Alpha1AttestationToV1(att)
|
||||
}
|
||||
return ðpbv1.AttestationsPoolResponse{Data: allAtts}, nil
|
||||
}
|
||||
|
||||
filteredAtts := make([]*ethpbv1.Attestation, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
bothDefined := req.Slot != nil && req.CommitteeIndex != nil
|
||||
committeeIndexMatch := req.CommitteeIndex != nil && att.Data.CommitteeIndex == *req.CommitteeIndex
|
||||
slotMatch := req.Slot != nil && att.Data.Slot == *req.Slot
|
||||
|
||||
if bothDefined && committeeIndexMatch && slotMatch {
|
||||
filteredAtts = append(filteredAtts, migration.V1Alpha1AttestationToV1(att))
|
||||
} else if !bothDefined && (committeeIndexMatch || slotMatch) {
|
||||
filteredAtts = append(filteredAtts, migration.V1Alpha1AttestationToV1(att))
|
||||
}
|
||||
}
|
||||
return ðpbv1.AttestationsPoolResponse{Data: filteredAtts}, nil
|
||||
}
|
||||
|
||||
// SubmitAttestations submits Attestation object to node. If attestation passes all validation
|
||||
// constraints, node MUST publish attestation on appropriate subnet.
|
||||
func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpbv1.SubmitAttestationsRequest) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttestation")
|
||||
defer span.End()
|
||||
|
||||
var validAttestations []*ethpbalpha.Attestation
|
||||
var attFailures []*helpers.SingleIndexedVerificationFailure
|
||||
for i, sourceAtt := range req.Data {
|
||||
att := migration.V1AttToV1Alpha1(sourceAtt)
|
||||
if _, err := bls.SignatureFromBytes(att.Signature); err != nil {
|
||||
attFailures = append(attFailures, &helpers.SingleIndexedVerificationFailure{
|
||||
Index: i,
|
||||
Message: "Incorrect attestation signature: " + err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
// Note we can't send for aggregated att because we don't have selection proof.
|
||||
if !corehelpers.IsAggregated(att) {
|
||||
bs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
validAttestations = append(validAttestations, att)
|
||||
}
|
||||
|
||||
broadcastFailed := false
|
||||
for _, att := range validAttestations {
|
||||
// Determine subnet to broadcast attestation to
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
vals, err := bs.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
|
||||
|
||||
if err := bs.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
broadcastFailed = true
|
||||
}
|
||||
|
||||
if corehelpers.IsAggregated(att) {
|
||||
if err := bs.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("could not save aggregated att")
|
||||
}
|
||||
} else {
|
||||
if err := bs.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("could not save unaggregated att")
|
||||
}
|
||||
}
|
||||
}
|
||||
if broadcastFailed {
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not publish one or more attestations. Some attestations could be published successfully.")
|
||||
}
|
||||
|
||||
if len(attFailures) > 0 {
|
||||
failuresContainer := &helpers.IndexedVerificationFailure{Failures: attFailures}
|
||||
err := grpc.AppendCustomErrorHeader(ctx, failuresContainer)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"One or more attestations failed validation. Could not prepare attestation failure information: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
return nil, status.Errorf(codes.InvalidArgument, "One or more attestations failed validation")
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.AttesterSlashingsPoolResponse, error) {
|
||||
|
||||
@@ -2,14 +2,9 @@ package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
@@ -36,172 +31,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestListPoolAttestations(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
att1 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: []byte{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
}
|
||||
att2 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: []byte{4, 40},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 4,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 40,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
att3 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: []byte{2, 20},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
att4 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 4,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
att5 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
}
|
||||
att6 := ðpbv1alpha1.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 20,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]*ethpbv1alpha1.Attestation{att1, att2, att3}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]*ethpbv1alpha1.Attestation{att4, att5, att6}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
req := ðpbv1.AttestationsPoolRequest{}
|
||||
resp, err := s.ListPoolAttestations(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(resp.Data))
|
||||
})
|
||||
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
slot := primitives.Slot(2)
|
||||
req := ðpbv1.AttestationsPoolRequest{
|
||||
Slot: &slot,
|
||||
}
|
||||
resp, err := s.ListPoolAttestations(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
for _, datum := range resp.Data {
|
||||
assert.DeepEqual(t, datum.Data.Slot, slot)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
index := primitives.CommitteeIndex(4)
|
||||
req := ðpbv1.AttestationsPoolRequest{
|
||||
CommitteeIndex: &index,
|
||||
}
|
||||
resp, err := s.ListPoolAttestations(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
for _, datum := range resp.Data {
|
||||
assert.DeepEqual(t, datum.Data.Index, index)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
slot := primitives.Slot(2)
|
||||
index := primitives.CommitteeIndex(4)
|
||||
req := ðpbv1.AttestationsPoolRequest{
|
||||
Slot: &slot,
|
||||
CommitteeIndex: &index,
|
||||
}
|
||||
resp, err := s.ListPoolAttestations(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
for _, datum := range resp.Data {
|
||||
assert.DeepEqual(t, datum.Data.Index, index)
|
||||
assert.DeepEqual(t, datum.Data.Slot, slot)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestListPoolAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -936,300 +768,6 @@ func TestSubmitVoluntaryExit_InvalidExit(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestServer_SubmitAttestations_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.SlotsPerEpoch = 1
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validators := []*ethpbv1alpha1.Validator{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = validators
|
||||
state.Slot = 1
|
||||
state.PreviousJustifiedCheckpoint = ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
b := bitfield.NewBitlist(1)
|
||||
b.SetBitAt(0, true)
|
||||
|
||||
sourceCheckpoint := ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
att1 := ðpbv1.Attestation{
|
||||
AggregationBits: b,
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 0,
|
||||
Index: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot1"), 32),
|
||||
Source: sourceCheckpoint,
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
att2 := ðpbv1.Attestation{
|
||||
AggregationBits: b,
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 0,
|
||||
Index: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
|
||||
Source: sourceCheckpoint,
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
for _, att := range []*ethpbv1.Attestation{att1, att2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
bs,
|
||||
slots.ToEpoch(att.Data.Slot),
|
||||
att.Data,
|
||||
params.BeaconConfig().DomainBeaconAttester,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttestations(ctx, ðpbv1.SubmitAttestationsRequest{
|
||||
Data: []*ethpbv1.Attestation{att1, att2},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
assert.Equal(t, 2, len(broadcaster.BroadcastAttestations))
|
||||
expectedAtt1, err := att1.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
expectedAtt2, err := att2.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
actualAtt1, err := broadcaster.BroadcastAttestations[0].HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
actualAtt2, err := broadcaster.BroadcastAttestations[1].HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, r := range [][32]byte{actualAtt1, actualAtt2} {
|
||||
assert.Equal(t, true, reflect.DeepEqual(expectedAtt1, r) || reflect.DeepEqual(expectedAtt2, r))
|
||||
}
|
||||
|
||||
require.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestServer_SubmitAttestations_ValidAttestationSubmitted(t *testing.T) {
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.SlotsPerEpoch = 1
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validators := []*ethpbv1alpha1.Validator{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = validators
|
||||
state.Slot = 1
|
||||
state.PreviousJustifiedCheckpoint = ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
sourceCheckpoint := ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
b := bitfield.NewBitlist(1)
|
||||
b.SetBitAt(0, true)
|
||||
attValid := ðpbv1.Attestation{
|
||||
AggregationBits: b,
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 0,
|
||||
Index: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot1"), 32),
|
||||
Source: sourceCheckpoint,
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
attInvalidSignature := ðpbv1.Attestation{
|
||||
AggregationBits: b,
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 0,
|
||||
Index: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
|
||||
Source: sourceCheckpoint,
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
// Don't sign attInvalidSignature.
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
bs,
|
||||
slots.ToEpoch(attValid.Data.Slot),
|
||||
attValid.Data,
|
||||
params.BeaconConfig().DomainBeaconAttester,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
attValid.Signature = sig.Marshal()
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttestations(ctx, ðpbv1.SubmitAttestationsRequest{
|
||||
Data: []*ethpbv1.Attestation{attValid, attInvalidSignature},
|
||||
})
|
||||
require.ErrorContains(t, "One or more attestations failed validation", err)
|
||||
expectedAtt, err := attValid.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastAttestations))
|
||||
broadcastRoot, err := broadcaster.BroadcastAttestations[0].HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedAtt, broadcastRoot)
|
||||
|
||||
require.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestServer_SubmitAttestations_InvalidAttestationGRPCHeader(t *testing.T) {
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
|
||||
|
||||
transition.SkipSlotCache.Disable()
|
||||
defer transition.SkipSlotCache.Enable()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Required for correct committee size calculation.
|
||||
c.SlotsPerEpoch = 1
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
validators := []*ethpbv1alpha1.Validator{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
|
||||
state.Validators = validators
|
||||
state.Slot = 1
|
||||
state.PreviousJustifiedCheckpoint = ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
b := bitfield.NewBitlist(1)
|
||||
b.SetBitAt(0, true)
|
||||
att := ðpbv1.Attestation{
|
||||
AggregationBits: b,
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: 0,
|
||||
Index: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconblockroot2"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: nil,
|
||||
}
|
||||
|
||||
chain := &blockchainmock.ChainService{State: bs}
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chain,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
Broadcaster: broadcaster,
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttestations(ctx, ðpbv1.SubmitAttestationsRequest{
|
||||
Data: []*ethpbv1.Attestation{att},
|
||||
})
|
||||
require.ErrorContains(t, "One or more attestations failed validation", err)
|
||||
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
|
||||
require.Equal(t, true, ok, "type assertion failed")
|
||||
md := sts.Header()
|
||||
v, ok := md[strings.ToLower(grpcutil.CustomErrorMetadataKey)]
|
||||
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[]string{"{\"failures\":[{\"index\":0,\"message\":\"Incorrect attestation signature: could not create signature from byte slice: signature must be 96 bytes\"}]}"},
|
||||
v,
|
||||
)
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
change1 := ðpbv1alpha1.SignedBLSToExecutionChange{
|
||||
Message: ðpbv1alpha1.BLSToExecutionChange{
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
bytesutil2 "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
@@ -13,6 +14,14 @@ import (
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
)
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*shared.Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data []*shared.Attestation `json:"data" validate:"required,dive"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlock struct {
|
||||
Message BeaconBlock `json:"message" validate:"required"`
|
||||
Signature string `json:"signature" validate:"required"`
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
statenative "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
@@ -102,13 +103,13 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
return ðpb.StateValidatorsResponse{Data: valContainers, ExecutionOptimistic: isOptimistic, Finalized: isFinalized}, nil
|
||||
}
|
||||
|
||||
filterStatus := make(map[ethpb.ValidatorStatus]bool, len(req.Status))
|
||||
const lastValidStatusValue = ethpb.ValidatorStatus(12)
|
||||
filterStatus := make(map[validator.ValidatorStatus]bool, len(req.Status))
|
||||
const lastValidStatusValue = 12
|
||||
for _, ss := range req.Status {
|
||||
if ss > lastValidStatusValue {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid status "+ss.String())
|
||||
}
|
||||
filterStatus[ss] = true
|
||||
filterStatus[validator.ValidatorStatus(ss)] = true
|
||||
}
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
filteredVals := make([]*ethpb.ValidatorContainer, 0, len(valContainers))
|
||||
@@ -243,8 +244,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
|
||||
if len(validatorIds) == 0 {
|
||||
allValidators := state.Validators()
|
||||
valContainers = make([]*ethpb.ValidatorContainer, len(allValidators))
|
||||
for i, validator := range allValidators {
|
||||
readOnlyVal, err := statenative.NewValidator(validator)
|
||||
for i, val := range allValidators {
|
||||
readOnlyVal, err := statenative.NewValidator(val)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
|
||||
}
|
||||
@@ -255,8 +256,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
|
||||
valContainers[i] = ðpb.ValidatorContainer{
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
Balance: allBalances[i],
|
||||
Status: subStatus,
|
||||
Validator: migration.V1Alpha1ValidatorToV1(validator),
|
||||
Status: ethpb.ValidatorStatus(subStatus),
|
||||
Validator: migration.V1Alpha1ValidatorToV1(val),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -278,7 +279,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
|
||||
}
|
||||
valIndex = primitives.ValidatorIndex(index)
|
||||
}
|
||||
validator, err := state.ValidatorAtIndex(valIndex)
|
||||
val, err := state.ValidatorAtIndex(valIndex)
|
||||
if _, ok := err.(*statenative.ValidatorIndexOutOfRangeError); ok {
|
||||
// Ignore well-formed yet unknown indexes.
|
||||
continue
|
||||
@@ -286,8 +287,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get validator")
|
||||
}
|
||||
v1Validator := migration.V1Alpha1ValidatorToV1(validator)
|
||||
readOnlyVal, err := statenative.NewValidator(validator)
|
||||
v1Validator := migration.V1Alpha1ValidatorToV1(val)
|
||||
readOnlyVal, err := statenative.NewValidator(val)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
|
||||
}
|
||||
@@ -298,7 +299,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
|
||||
valContainers = append(valContainers, ðpb.ValidatorContainer{
|
||||
Index: valIndex,
|
||||
Balance: allBalances[valIndex],
|
||||
Status: subStatus,
|
||||
Status: ethpb.ValidatorStatus(subStatus),
|
||||
Validator: v1Validator,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -463,7 +464,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
true,
|
||||
status == ethpb.ValidatorStatus_ACTIVE,
|
||||
status == validator.Active,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
@@ -501,7 +502,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
true,
|
||||
status == ethpb.ValidatorStatus_ACTIVE_ONGOING,
|
||||
status == validator.ActiveOngoing,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
@@ -538,7 +539,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
true,
|
||||
status == ethpb.ValidatorStatus_EXITED,
|
||||
status == validator.Exited,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
@@ -574,7 +575,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
true,
|
||||
status == ethpb.ValidatorStatus_PENDING_INITIALIZED || status == ethpb.ValidatorStatus_EXITED_UNSLASHED,
|
||||
status == validator.PendingInitialized || status == validator.ExitedUnslashed,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
@@ -612,7 +613,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
true,
|
||||
status == ethpb.ValidatorStatus_PENDING || subStatus == ethpb.ValidatorStatus_EXITED_SLASHED,
|
||||
status == validator.Pending || subStatus == validator.ExitedSlashed,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
|
||||
@@ -13,14 +13,15 @@ go_library(
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
@@ -48,6 +49,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -38,8 +39,8 @@ func ValidateSyncGRPC(
|
||||
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
syncDetailsContainer := &SyncDetailsContainer{
|
||||
Data: &SyncDetailsJson{
|
||||
syncDetailsContainer := &shared.SyncDetailsContainer{
|
||||
Data: &shared.SyncDetails{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
@@ -58,35 +59,6 @@ func ValidateSyncGRPC(
|
||||
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
// ValidateSyncHTTP checks whether the node is currently syncing and returns sync information.
|
||||
// It returns information whether the node is currently syncing along with sync details.
|
||||
func ValidateSyncHTTP(
|
||||
ctx context.Context,
|
||||
syncChecker sync.Checker,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
timeFetcher blockchain.TimeFetcher,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
) (bool, *SyncDetailsContainer, error) {
|
||||
if !syncChecker.Syncing() {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
headSlot := headFetcher.HeadSlot()
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return true, nil, errors.Wrap(err, "could not check optimistic status")
|
||||
}
|
||||
syncDetails := &SyncDetailsContainer{
|
||||
Data: &SyncDetailsJson{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
IsOptimistic: isOptimistic,
|
||||
},
|
||||
}
|
||||
return true, syncDetails, nil
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the beacon state's block is optimistic.
|
||||
func IsOptimistic(
|
||||
ctx context.Context,
|
||||
@@ -216,17 +188,3 @@ func isStateRootOptimistic(
|
||||
// No block matching requested state root, return true.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SyncDetailsJson contains information about node sync status.
|
||||
type SyncDetailsJson struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
IsOptimistic bool `json:"is_optimistic"`
|
||||
ElOffline bool `json:"el_offline"`
|
||||
}
|
||||
|
||||
// SyncDetailsContainer is a wrapper for Data.
|
||||
type SyncDetailsContainer struct {
|
||||
Data *SyncDetailsJson `json:"data"`
|
||||
}
|
||||
|
||||
@@ -5,66 +5,66 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
)
|
||||
|
||||
// ValidatorStatus returns a validator's status at the given epoch.
|
||||
func ValidatorStatus(validator state.ReadOnlyValidator, epoch primitives.Epoch) (ethpb.ValidatorStatus, error) {
|
||||
valStatus, err := ValidatorSubStatus(validator, epoch)
|
||||
func ValidatorStatus(val state.ReadOnlyValidator, epoch primitives.Epoch) (validator.ValidatorStatus, error) {
|
||||
valStatus, err := ValidatorSubStatus(val, epoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get sub status")
|
||||
return 0, errors.Wrap(err, "could not get validator sub status")
|
||||
}
|
||||
switch valStatus {
|
||||
case ethpb.ValidatorStatus_PENDING_INITIALIZED, ethpb.ValidatorStatus_PENDING_QUEUED:
|
||||
return ethpb.ValidatorStatus_PENDING, nil
|
||||
case ethpb.ValidatorStatus_ACTIVE_ONGOING, ethpb.ValidatorStatus_ACTIVE_SLASHED, ethpb.ValidatorStatus_ACTIVE_EXITING:
|
||||
return ethpb.ValidatorStatus_ACTIVE, nil
|
||||
case ethpb.ValidatorStatus_EXITED_UNSLASHED, ethpb.ValidatorStatus_EXITED_SLASHED:
|
||||
return ethpb.ValidatorStatus_EXITED, nil
|
||||
case ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE, ethpb.ValidatorStatus_WITHDRAWAL_DONE:
|
||||
return ethpb.ValidatorStatus_WITHDRAWAL, nil
|
||||
case validator.PendingInitialized, validator.PendingQueued:
|
||||
return validator.Pending, nil
|
||||
case validator.ActiveOngoing, validator.ActiveSlashed, validator.ActiveExiting:
|
||||
return validator.Active, nil
|
||||
case validator.ExitedUnslashed, validator.ExitedSlashed:
|
||||
return validator.Exited, nil
|
||||
case validator.WithdrawalPossible, validator.WithdrawalDone:
|
||||
return validator.Withdrawal, nil
|
||||
}
|
||||
return 0, errors.New("invalid validator state")
|
||||
}
|
||||
|
||||
// ValidatorSubStatus returns a validator's sub-status at the given epoch.
|
||||
func ValidatorSubStatus(validator state.ReadOnlyValidator, epoch primitives.Epoch) (ethpb.ValidatorStatus, error) {
|
||||
func ValidatorSubStatus(val state.ReadOnlyValidator, epoch primitives.Epoch) (validator.ValidatorStatus, error) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
|
||||
// Pending.
|
||||
if validator.ActivationEpoch() > epoch {
|
||||
if validator.ActivationEligibilityEpoch() == farFutureEpoch {
|
||||
return ethpb.ValidatorStatus_PENDING_INITIALIZED, nil
|
||||
} else if validator.ActivationEligibilityEpoch() < farFutureEpoch {
|
||||
return ethpb.ValidatorStatus_PENDING_QUEUED, nil
|
||||
if val.ActivationEpoch() > epoch {
|
||||
if val.ActivationEligibilityEpoch() == farFutureEpoch {
|
||||
return validator.PendingInitialized, nil
|
||||
} else if val.ActivationEligibilityEpoch() < farFutureEpoch {
|
||||
return validator.PendingQueued, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Active.
|
||||
if validator.ActivationEpoch() <= epoch && epoch < validator.ExitEpoch() {
|
||||
if validator.ExitEpoch() == farFutureEpoch {
|
||||
return ethpb.ValidatorStatus_ACTIVE_ONGOING, nil
|
||||
} else if validator.ExitEpoch() < farFutureEpoch {
|
||||
if validator.Slashed() {
|
||||
return ethpb.ValidatorStatus_ACTIVE_SLASHED, nil
|
||||
if val.ActivationEpoch() <= epoch && epoch < val.ExitEpoch() {
|
||||
if val.ExitEpoch() == farFutureEpoch {
|
||||
return validator.ActiveOngoing, nil
|
||||
} else if val.ExitEpoch() < farFutureEpoch {
|
||||
if val.Slashed() {
|
||||
return validator.ActiveSlashed, nil
|
||||
}
|
||||
return ethpb.ValidatorStatus_ACTIVE_EXITING, nil
|
||||
return validator.ActiveExiting, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Exited.
|
||||
if validator.ExitEpoch() <= epoch && epoch < validator.WithdrawableEpoch() {
|
||||
if validator.Slashed() {
|
||||
return ethpb.ValidatorStatus_EXITED_SLASHED, nil
|
||||
if val.ExitEpoch() <= epoch && epoch < val.WithdrawableEpoch() {
|
||||
if val.Slashed() {
|
||||
return validator.ExitedSlashed, nil
|
||||
}
|
||||
return ethpb.ValidatorStatus_EXITED_UNSLASHED, nil
|
||||
return validator.ExitedUnslashed, nil
|
||||
}
|
||||
|
||||
if validator.WithdrawableEpoch() <= epoch {
|
||||
if validator.EffectiveBalance() != 0 {
|
||||
return ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE, nil
|
||||
if val.WithdrawableEpoch() <= epoch {
|
||||
if val.EffectiveBalance() != 0 {
|
||||
return validator.WithdrawalPossible, nil
|
||||
} else {
|
||||
return ethpb.ValidatorStatus_WITHDRAWAL_DONE, nil
|
||||
return validator.WithdrawalDone, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
@@ -23,7 +24,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want ethpb.ValidatorStatus
|
||||
want validator.ValidatorStatus
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
@@ -35,7 +36,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_PENDING,
|
||||
want: validator.Pending,
|
||||
},
|
||||
{
|
||||
name: "pending queued",
|
||||
@@ -46,7 +47,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_PENDING,
|
||||
want: validator.Pending,
|
||||
},
|
||||
{
|
||||
name: "active ongoing",
|
||||
@@ -57,7 +58,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE,
|
||||
want: validator.Active,
|
||||
},
|
||||
{
|
||||
name: "active slashed",
|
||||
@@ -69,7 +70,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE,
|
||||
want: validator.Active,
|
||||
},
|
||||
{
|
||||
name: "active exiting",
|
||||
@@ -81,7 +82,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE,
|
||||
want: validator.Active,
|
||||
},
|
||||
{
|
||||
name: "exited slashed",
|
||||
@@ -94,7 +95,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(35),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_EXITED,
|
||||
want: validator.Exited,
|
||||
},
|
||||
{
|
||||
name: "exited unslashed",
|
||||
@@ -107,7 +108,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(35),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_EXITED,
|
||||
want: validator.Exited,
|
||||
},
|
||||
{
|
||||
name: "withdrawal possible",
|
||||
@@ -121,7 +122,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(45),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_WITHDRAWAL,
|
||||
want: validator.Withdrawal,
|
||||
},
|
||||
{
|
||||
name: "withdrawal done",
|
||||
@@ -135,7 +136,7 @@ func Test_ValidatorStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(45),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_WITHDRAWAL,
|
||||
want: validator.Withdrawal,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -161,7 +162,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want ethpb.ValidatorStatus
|
||||
want validator.ValidatorStatus
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
@@ -173,7 +174,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_PENDING_INITIALIZED,
|
||||
want: validator.PendingInitialized,
|
||||
},
|
||||
{
|
||||
name: "pending queued",
|
||||
@@ -184,7 +185,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_PENDING_QUEUED,
|
||||
want: validator.PendingQueued,
|
||||
},
|
||||
{
|
||||
name: "active ongoing",
|
||||
@@ -195,7 +196,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE_ONGOING,
|
||||
want: validator.ActiveOngoing,
|
||||
},
|
||||
{
|
||||
name: "active slashed",
|
||||
@@ -207,7 +208,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE_SLASHED,
|
||||
want: validator.ActiveSlashed,
|
||||
},
|
||||
{
|
||||
name: "active exiting",
|
||||
@@ -219,7 +220,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(5),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_ACTIVE_EXITING,
|
||||
want: validator.ActiveExiting,
|
||||
},
|
||||
{
|
||||
name: "exited slashed",
|
||||
@@ -232,7 +233,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(35),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_EXITED_SLASHED,
|
||||
want: validator.ExitedSlashed,
|
||||
},
|
||||
{
|
||||
name: "exited unslashed",
|
||||
@@ -245,7 +246,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(35),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_EXITED_UNSLASHED,
|
||||
want: validator.ExitedUnslashed,
|
||||
},
|
||||
{
|
||||
name: "withdrawal possible",
|
||||
@@ -259,7 +260,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(45),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE,
|
||||
want: validator.WithdrawalPossible,
|
||||
},
|
||||
{
|
||||
name: "withdrawal done",
|
||||
@@ -273,7 +274,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
|
||||
},
|
||||
epoch: primitives.Epoch(45),
|
||||
},
|
||||
want: ethpb.ValidatorStatus_WITHDRAWAL_DONE,
|
||||
want: validator.WithdrawalDone,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -3,8 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"node.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
@@ -17,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -35,6 +38,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_test.go",
|
||||
"node_test.go",
|
||||
"server_test.go",
|
||||
],
|
||||
|
||||
34
beacon-chain/rpc/eth/node/handlers.go
Normal file
34
beacon-chain/rpc/eth/node/handlers.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// GetSyncStatus requests the beacon node to describe if it's currently syncing or not, and
|
||||
// if it is, what block it is up to.
|
||||
func (s *Server) GetSyncStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "node.GetSyncStatus")
|
||||
defer span.End()
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
headSlot := s.HeadFetcher.HeadSlot()
|
||||
response := &SyncStatusResponse{
|
||||
Data: &SyncStatusResponseData{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(s.GenesisTimeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: s.SyncChecker.Syncing(),
|
||||
IsOptimistic: isOptimistic,
|
||||
ElOffline: !s.ExecutionChainInfoFetcher.ExecutionClientConnected(),
|
||||
},
|
||||
}
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
52
beacon-chain/rpc/eth/node/handlers_test.go
Normal file
52
beacon-chain/rpc/eth/node/handlers_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
syncmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestSyncStatus(t *testing.T) {
|
||||
currentSlot := new(primitives.Slot)
|
||||
*currentSlot = 110
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = state.SetSlot(100)
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
|
||||
syncChecker := &syncmock.Sync{}
|
||||
syncChecker.IsSyncing = true
|
||||
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: syncChecker,
|
||||
ExecutionChainInfoFetcher: &testutil.MockExecutionChainInfoFetcher{},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncStatus(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &SyncStatusResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "100", resp.Data.HeadSlot)
|
||||
assert.Equal(t, "10", resp.Data.SyncDistance)
|
||||
assert.Equal(t, true, resp.Data.IsSyncing)
|
||||
assert.Equal(t, true, resp.Data.IsOptimistic)
|
||||
assert.Equal(t, false, resp.Data.ElOffline)
|
||||
}
|
||||
@@ -259,29 +259,6 @@ func (_ *Server) GetVersion(ctx context.Context, _ *emptypb.Empty) (*ethpb.Versi
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetSyncStatus requests the beacon node to describe if it's currently syncing or not, and
|
||||
// if it is, what block it is up to.
|
||||
func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.SyncingResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetSyncStatus")
|
||||
defer span.End()
|
||||
|
||||
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
headSlot := ns.HeadFetcher.HeadSlot()
|
||||
return ðpb.SyncingResponse{
|
||||
Data: ðpb.SyncInfo{
|
||||
HeadSlot: headSlot,
|
||||
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
|
||||
IsSyncing: ns.SyncChecker.Syncing(),
|
||||
IsOptimistic: isOptimistic,
|
||||
ElOffline: !ns.ExecutionChainInfoFetcher.ExecutionClientConnected(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
||||
// Response Usage:
|
||||
//
|
||||
|
||||
@@ -18,20 +18,16 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
syncmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
@@ -161,33 +157,6 @@ func TestGetIdentity(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatus(t *testing.T) {
|
||||
currentSlot := new(primitives.Slot)
|
||||
*currentSlot = 110
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = state.SetSlot(100)
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
|
||||
syncChecker := &syncmock.Sync{}
|
||||
syncChecker.IsSyncing = true
|
||||
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: syncChecker,
|
||||
ExecutionChainInfoFetcher: &testutil.MockExecutionChainInfoFetcher{},
|
||||
}
|
||||
resp, err := s.GetSyncStatus(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(100), resp.Data.HeadSlot)
|
||||
assert.Equal(t, primitives.Slot(10), resp.Data.SyncDistance)
|
||||
assert.Equal(t, true, resp.Data.IsSyncing)
|
||||
assert.Equal(t, true, resp.Data.IsOptimistic)
|
||||
assert.Equal(t, false, resp.Data.ElOffline)
|
||||
}
|
||||
|
||||
func TestGetPeer(t *testing.T) {
|
||||
const rawId = "16Uiu2HAkvyYtoQXZNTsthjgLHjEnv7kvwzEmjvsJjWXpbhtqpSUN"
|
||||
ctx := context.Background()
|
||||
|
||||
13
beacon-chain/rpc/eth/node/structs.go
Normal file
13
beacon-chain/rpc/eth/node/structs.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package node
|
||||
|
||||
type SyncStatusResponse struct {
|
||||
Data *SyncStatusResponseData `json:"data"`
|
||||
}
|
||||
|
||||
type SyncStatusResponseData struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
IsOptimistic bool `json:"is_optimistic"`
|
||||
ElOffline bool `json:"el_offline"`
|
||||
}
|
||||
@@ -10,7 +10,11 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -26,3 +26,20 @@ func NewDecodeError(err error, field string) *DecodeError {
|
||||
func (e *DecodeError) Error() string {
|
||||
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
|
||||
}
|
||||
|
||||
// IndexedVerificationFailureError wraps a collection of verification failures.
|
||||
type IndexedVerificationFailureError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
Failures []*IndexedVerificationFailure `json:"failures"`
|
||||
}
|
||||
|
||||
func (e *IndexedVerificationFailureError) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents an issue when verifying a single indexed object e.g. an item in an array.
|
||||
type IndexedVerificationFailure struct {
|
||||
Index int `json:"index"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
@@ -1,13 +1,29 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
)
|
||||
|
||||
func UintFromQuery(w http.ResponseWriter, r *http.Request, name string) (bool, string, uint64) {
|
||||
raw := r.URL.Query().Get(name)
|
||||
if raw != "" {
|
||||
v, valid := ValidateUint(w, name, raw)
|
||||
if !valid {
|
||||
return false, "", 0
|
||||
}
|
||||
return true, raw, v
|
||||
}
|
||||
return true, "", 0
|
||||
}
|
||||
|
||||
func ValidateHex(w http.ResponseWriter, name string, s string) bool {
|
||||
if s == "" {
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
@@ -48,3 +64,73 @@ func ValidateUint(w http.ResponseWriter, name string, s string) (uint64, bool) {
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
// IsSyncing checks whether the beacon node is currently syncing and writes out the sync status.
|
||||
func IsSyncing(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
syncChecker sync.Checker,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
timeFetcher blockchain.TimeFetcher,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
) bool {
|
||||
if !syncChecker.Syncing() {
|
||||
return false
|
||||
}
|
||||
|
||||
headSlot := headFetcher.HeadSlot()
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Message: "Could not check optimistic status: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
http2.WriteError(w, errJson)
|
||||
return true
|
||||
}
|
||||
syncDetails := &SyncDetailsContainer{
|
||||
Data: &SyncDetails{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
IsOptimistic: isOptimistic,
|
||||
},
|
||||
}
|
||||
|
||||
msg := "Beacon node is currently syncing and not serving request on that endpoint"
|
||||
details, err := json.Marshal(syncDetails)
|
||||
if err == nil {
|
||||
msg += " Details: " + string(details)
|
||||
}
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Message: msg,
|
||||
Code: http.StatusServiceUnavailable}
|
||||
http2.WriteError(w, errJson)
|
||||
return true
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the beacon node is currently optimistic and writes it to the response.
|
||||
func IsOptimistic(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
) (bool, error) {
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Message: "Could not check optimistic status: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
http2.WriteError(w, errJson)
|
||||
return true, err
|
||||
}
|
||||
if !isOptimistic {
|
||||
return false, nil
|
||||
}
|
||||
errJson := &http2.DefaultErrorJson{
|
||||
Code: http.StatusServiceUnavailable,
|
||||
Message: "Beacon node is currently optimistic and not serving validators",
|
||||
}
|
||||
http2.WriteError(w, errJson)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -57,6 +60,81 @@ type AggregateAttestationAndProof struct {
|
||||
SelectionProof string `json:"selection_proof" validate:"required,hexadecimal"`
|
||||
}
|
||||
|
||||
type SyncCommitteeSubscription struct {
|
||||
ValidatorIndex string `json:"validator_index" validate:"required,number,gte=0"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices" validate:"required,dive,number,gte=0"`
|
||||
UntilEpoch string `json:"until_epoch" validate:"required,number,gte=0"`
|
||||
}
|
||||
|
||||
type BeaconCommitteeSubscription struct {
|
||||
ValidatorIndex string `json:"validator_index" validate:"required,number,gte=0"`
|
||||
CommitteeIndex string `json:"committee_index" validate:"required,number,gte=0"`
|
||||
CommitteesAtSlot string `json:"committees_at_slot" validate:"required,number,gte=0"`
|
||||
Slot string `json:"slot" validate:"required,number,gte=0"`
|
||||
IsAggregator bool `json:"is_aggregator"`
|
||||
}
|
||||
|
||||
type ValidatorRegistration struct {
|
||||
FeeRecipient string `json:"fee_recipient" validate:"required,hexadecimal"`
|
||||
GasLimit string `json:"gas_limit" validate:"required,number,gte=0"`
|
||||
Timestamp string `json:"timestamp" validate:"required,number,gte=0"`
|
||||
Pubkey string `json:"pubkey" validate:"required,hexadecimal"`
|
||||
}
|
||||
|
||||
type SignedValidatorRegistration struct {
|
||||
Message *ValidatorRegistration `json:"message" validate:"required,dive"`
|
||||
Signature string `json:"signature" validate:"required,hexadecimal"`
|
||||
}
|
||||
|
||||
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Message")
|
||||
}
|
||||
sig, err := hexutil.Decode(s.Signature)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Signature")
|
||||
}
|
||||
if len(sig) != fieldparams.BLSSignatureLength {
|
||||
return nil, fmt.Errorf("Signature length was %d when expecting length %d", len(sig), fieldparams.BLSSignatureLength)
|
||||
}
|
||||
return ð.SignedValidatorRegistrationV1{
|
||||
Message: msg,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ValidatorRegistration) ToConsensus() (*eth.ValidatorRegistrationV1, error) {
|
||||
feeRecipient, err := hexutil.Decode(s.FeeRecipient)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "FeeRecipient")
|
||||
}
|
||||
if len(feeRecipient) != fieldparams.FeeRecipientLength {
|
||||
return nil, fmt.Errorf("feeRecipient length was %d when expecting length %d", len(feeRecipient), fieldparams.FeeRecipientLength)
|
||||
}
|
||||
pubKey, err := hexutil.Decode(s.Pubkey)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "FeeRecipient")
|
||||
}
|
||||
if len(pubKey) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("FeeRecipient length was %d when expecting length %d", len(pubKey), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
gasLimit, err := strconv.ParseUint(s.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "GasLimit")
|
||||
}
|
||||
timestamp, err := strconv.ParseUint(s.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Timestamp")
|
||||
}
|
||||
return ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
Timestamp: timestamp,
|
||||
Pubkey: pubKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -182,6 +260,14 @@ func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttestationFromConsensus(a *eth.Attestation) *Attestation {
|
||||
return &Attestation{
|
||||
AggregationBits: hexutil.Encode(a.AggregationBits),
|
||||
Data: AttestationDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
slot, err := strconv.ParseUint(a.Slot, 10, 64)
|
||||
if err != nil {
|
||||
@@ -213,6 +299,16 @@ func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttestationDataFromConsensus(a *eth.AttestationData) *AttestationData {
|
||||
return &AttestationData{
|
||||
Slot: strconv.FormatUint(uint64(a.Slot), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(a.CommitteeIndex), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(a.BeaconBlockRoot),
|
||||
Source: CheckpointFromConsensus(a.Source),
|
||||
Target: CheckpointFromConsensus(a.Target),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Checkpoint) ToConsensus() (*eth.Checkpoint, error) {
|
||||
epoch, err := strconv.ParseUint(c.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
@@ -228,3 +324,75 @@ func (c *Checkpoint) ToConsensus() (*eth.Checkpoint, error) {
|
||||
Root: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func CheckpointFromConsensus(c *eth.Checkpoint) *Checkpoint {
|
||||
return &Checkpoint{
|
||||
Epoch: strconv.FormatUint(uint64(c.Epoch), 10),
|
||||
Root: hexutil.Encode(c.Root),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncCommitteeSubscription) ToConsensus() (*validator.SyncCommitteeSubscription, error) {
|
||||
index, err := strconv.ParseUint(s.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "ValidatorIndex")
|
||||
}
|
||||
scIndices := make([]uint64, len(s.SyncCommitteeIndices))
|
||||
for i, ix := range s.SyncCommitteeIndices {
|
||||
scIndices[i], err = strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, fmt.Sprintf("SyncCommitteeIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
epoch, err := strconv.ParseUint(s.UntilEpoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "UntilEpoch")
|
||||
}
|
||||
|
||||
return &validator.SyncCommitteeSubscription{
|
||||
ValidatorIndex: primitives.ValidatorIndex(index),
|
||||
SyncCommitteeIndices: scIndices,
|
||||
UntilEpoch: primitives.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeSubscription, error) {
|
||||
valIndex, err := strconv.ParseUint(b.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "ValidatorIndex")
|
||||
}
|
||||
committeeIndex, err := strconv.ParseUint(b.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "CommitteeIndex")
|
||||
}
|
||||
committeesAtSlot, err := strconv.ParseUint(b.CommitteesAtSlot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "CommitteesAtSlot")
|
||||
}
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Slot")
|
||||
}
|
||||
|
||||
return &validator.BeaconCommitteeSubscription{
|
||||
ValidatorIndex: primitives.ValidatorIndex(valIndex),
|
||||
CommitteeIndex: primitives.CommitteeIndex(committeeIndex),
|
||||
CommitteesAtSlot: committeesAtSlot,
|
||||
Slot: primitives.Slot(slot),
|
||||
IsAggregator: b.IsAggregator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SyncDetails contains information about node sync status.
|
||||
type SyncDetails struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
IsOptimistic bool `json:"is_optimistic"`
|
||||
ElOffline bool `json:"el_offline"`
|
||||
}
|
||||
|
||||
// SyncDetailsContainer is a wrapper for Data.
|
||||
type SyncDetailsContainer struct {
|
||||
Data *SyncDetails `json:"data"`
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ go_library(
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
@@ -43,7 +44,6 @@ go_library(
|
||||
"@com_github_go_playground_validator_v10//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
@@ -62,14 +62,19 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/builder/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -88,7 +93,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,21 +2,38 @@ package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
validator2 "github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// GetAggregateAttestation aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestation")
|
||||
defer span.End()
|
||||
|
||||
attDataRoot := r.URL.Query().Get("attestation_data_root")
|
||||
valid := shared.ValidateHex(w, "Attestation data root", attDataRoot)
|
||||
if !valid {
|
||||
@@ -28,7 +45,7 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.AttestationsPool.AggregateUnaggregatedAttestations(r.Context()); err != nil {
|
||||
if err := s.AttestationsPool.AggregateUnaggregatedAttestations(ctx); err != nil {
|
||||
http2.HandleError(w, "Could not aggregate unaggregated attestations: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -82,17 +99,22 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// SubmitContributionAndProofs publishes multiple signed sync committee contribution and proofs.
|
||||
func (s *Server) SubmitContributionAndProofs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitContributionAndProofs")
|
||||
defer span.End()
|
||||
|
||||
if r.Body == http.NoBody {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var req SubmitContributionAndProofsRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.Data) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
@@ -105,7 +127,7 @@ func (s *Server) SubmitContributionAndProofs(w http.ResponseWriter, r *http.Requ
|
||||
http2.HandleError(w, "Could not convert request contribution to consensus contribution: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
rpcError := core.SubmitSignedContributionAndProof(r.Context(), consensusItem, s.Broadcaster, s.SyncCommitteePool, s.OperationNotifier)
|
||||
rpcError := s.CoreService.SubmitSignedContributionAndProof(ctx, consensusItem)
|
||||
if rpcError != nil {
|
||||
http2.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
|
||||
}
|
||||
@@ -114,25 +136,28 @@ func (s *Server) SubmitContributionAndProofs(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// SubmitAggregateAndProofs verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
|
||||
func (s *Server) SubmitAggregateAndProofs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitAggregateAndProofs")
|
||||
defer span.End()
|
||||
|
||||
if r.Body == http.NoBody {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var req SubmitAggregateAndProofsRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.Data) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
genesisTime := s.TimeFetcher.GenesisTime()
|
||||
|
||||
broadcastFailed := false
|
||||
for _, item := range req.Data {
|
||||
consensusItem, err := item.ToConsensus()
|
||||
@@ -140,11 +165,9 @@ func (s *Server) SubmitAggregateAndProofs(w http.ResponseWriter, r *http.Request
|
||||
http2.HandleError(w, "Could not convert request aggregate to consensus aggregate: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
rpcError := core.SubmitSignedAggregateSelectionProof(
|
||||
r.Context(),
|
||||
rpcError := s.CoreService.SubmitSignedAggregateSelectionProof(
|
||||
ctx,
|
||||
ðpbalpha.SignedAggregateSubmitRequest{SignedAggregateAndProof: consensusItem},
|
||||
genesisTime,
|
||||
s.Broadcaster,
|
||||
)
|
||||
if rpcError != nil {
|
||||
_, ok := rpcError.Err.(*core.AggregateBroadcastFailedError)
|
||||
@@ -161,3 +184,390 @@ func (s *Server) SubmitAggregateAndProofs(w http.ResponseWriter, r *http.Request
|
||||
http2.HandleError(w, "Could not broadcast one or more signed aggregated attestations", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitSyncCommitteeSubscription subscribe to a number of sync committee subnets.
|
||||
//
|
||||
// Subscribing to sync committee subnets is an action performed by VC to enable
|
||||
// network participation, and only required if the VC has an active
|
||||
// validator in an active sync committee.
|
||||
func (s *Server) SubmitSyncCommitteeSubscription(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitSyncCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == http.NoBody {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var req SubmitSyncCommitteeSubscriptionsRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(req.Data) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
currEpoch := slots.ToEpoch(st.Slot())
|
||||
validators := make([]state.ReadOnlyValidator, len(req.Data))
|
||||
subscriptions := make([]*validator2.SyncCommitteeSubscription, len(req.Data))
|
||||
for i, item := range req.Data {
|
||||
consensusItem, err := item.ToConsensus()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not convert request subscription to consensus subscription: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
subscriptions[i] = consensusItem
|
||||
val, err := st.ValidatorAtIndexReadOnly(consensusItem.ValidatorIndex)
|
||||
if err != nil {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not get validator at index %d: %s", consensusItem.ValidatorIndex, err.Error()),
|
||||
http.StatusInternalServerError,
|
||||
)
|
||||
return
|
||||
}
|
||||
valStatus, err := rpchelpers.ValidatorSubStatus(val, currEpoch)
|
||||
if err != nil {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not get validator status at index %d: %s", consensusItem.ValidatorIndex, err.Error()),
|
||||
http.StatusInternalServerError,
|
||||
)
|
||||
return
|
||||
}
|
||||
if valStatus != validator2.ActiveOngoing && valStatus != validator2.ActiveExiting {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Validator at index %d is not active or exiting", consensusItem.ValidatorIndex),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
validators[i] = val
|
||||
}
|
||||
|
||||
startEpoch, err := slots.SyncCommitteePeriodStartEpoch(currEpoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get sync committee period start epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
for i, sub := range subscriptions {
|
||||
if sub.UntilEpoch <= currEpoch {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Epoch for subscription at index %d is in the past. It must be at least %d", i, currEpoch+1),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
maxValidUntilEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod*2
|
||||
if sub.UntilEpoch > maxValidUntilEpoch {
|
||||
http2.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Epoch for subscription at index %d is too far in the future. It can be at most %d", i, maxValidUntilEpoch),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for i, sub := range subscriptions {
|
||||
pubkey48 := validators[i].PublicKey()
|
||||
// Handle overflow in the event current epoch is less than end epoch.
|
||||
// This is an impossible condition, so it is a defensive check.
|
||||
epochsToWatch, err := sub.UntilEpoch.SafeSub(uint64(startEpoch))
|
||||
if err != nil {
|
||||
epochsToWatch = 0
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
totalDuration := epochDuration * time.Duration(epochsToWatch)
|
||||
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey48[:], startEpoch, sub.SyncCommitteeIndices, totalDuration)
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitBeaconCommitteeSubscription searches using discv5 for peers related to the provided subnet information
|
||||
// and replaces current peers with those ones if necessary.
|
||||
func (s *Server) SubmitBeaconCommitteeSubscription(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitBeaconCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == http.NoBody {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var req SubmitBeaconCommitteeSubscriptionsRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(req.Data) == 0 {
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify validators at the beginning to return early if request is invalid.
|
||||
validators := make([]state.ReadOnlyValidator, len(req.Data))
|
||||
subscriptions := make([]*validator2.BeaconCommitteeSubscription, len(req.Data))
|
||||
for i, item := range req.Data {
|
||||
consensusItem, err := item.ToConsensus()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not convert request subscription to consensus subscription: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
subscriptions[i] = consensusItem
|
||||
val, err := st.ValidatorAtIndexReadOnly(consensusItem.ValidatorIndex)
|
||||
if err != nil {
|
||||
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
|
||||
http2.HandleError(w, "Could not get validator: "+outOfRangeErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, "Could not get validator: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
validators[i] = val
|
||||
}
|
||||
|
||||
fetchValsLen := func(slot primitives.Slot) (uint64, error) {
|
||||
wantedEpoch := slots.ToEpoch(slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(vals)), nil
|
||||
}
|
||||
|
||||
// Request the head validator indices of epoch represented by the first requested slot.
|
||||
currValsLen, err := fetchValsLen(subscriptions[0].Slot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not retrieve head validator length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
currEpoch := slots.ToEpoch(subscriptions[0].Slot)
|
||||
for _, sub := range subscriptions {
|
||||
// If epoch has changed, re-request active validators length
|
||||
if currEpoch != slots.ToEpoch(sub.Slot) {
|
||||
currValsLen, err = fetchValsLen(sub.Slot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not retrieve head validator length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
currEpoch = slots.ToEpoch(sub.Slot)
|
||||
}
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(currValsLen, sub.CommitteeIndex, sub.Slot)
|
||||
cache.SubnetIDs.AddAttesterSubnetID(sub.Slot, subnet)
|
||||
if sub.IsAggregator {
|
||||
cache.SubnetIDs.AddAggregatorSubnetID(sub.Slot, subnet)
|
||||
}
|
||||
}
|
||||
for _, val := range validators {
|
||||
valStatus, err := rpchelpers.ValidatorStatus(val, currEpoch)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not retrieve validator status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
pubkey := val.PublicKey()
|
||||
core.AssignValidatorToSubnet(pubkey[:], valStatus)
|
||||
}
|
||||
}
|
||||
|
||||
// GetAttestationData requests that the beacon node produces attestation data for
|
||||
// the requested committee index and slot based on the nodes current head.
|
||||
func (s *Server) GetAttestationData(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAttestationData")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
if isOptimistic, err := shared.IsOptimistic(ctx, w, s.OptimisticModeFetcher); isOptimistic || err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rawSlot := r.URL.Query().Get("slot")
|
||||
slot, valid := shared.ValidateUint(w, "Slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
rawCommitteeIndex := r.URL.Query().Get("committee_index")
|
||||
committeeIndex, valid := shared.ValidateUint(w, "Committee Index", rawCommitteeIndex)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
|
||||
attestationData, rpcError := s.CoreService.GetAttestationData(ctx, ðpbalpha.AttestationDataRequest{
|
||||
Slot: primitives.Slot(slot),
|
||||
CommitteeIndex: primitives.CommitteeIndex(committeeIndex),
|
||||
})
|
||||
|
||||
if rpcError != nil {
|
||||
http2.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
|
||||
return
|
||||
}
|
||||
|
||||
response := &GetAttestationDataResponse{
|
||||
Data: &shared.AttestationData{
|
||||
Slot: strconv.FormatUint(uint64(attestationData.Slot), 10),
|
||||
CommitteeIndex: strconv.FormatUint(uint64(attestationData.CommitteeIndex), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(attestationData.BeaconBlockRoot),
|
||||
Source: &shared.Checkpoint{
|
||||
Epoch: strconv.FormatUint(uint64(attestationData.Source.Epoch), 10),
|
||||
Root: hexutil.Encode(attestationData.Source.Root),
|
||||
},
|
||||
Target: &shared.Checkpoint{
|
||||
Epoch: strconv.FormatUint(uint64(attestationData.Target.Epoch), 10),
|
||||
Root: hexutil.Encode(attestationData.Target.Root),
|
||||
},
|
||||
},
|
||||
}
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// ProduceSyncCommitteeContribution requests that the beacon node produce a sync committee contribution.
|
||||
func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceSyncCommitteeContribution")
|
||||
defer span.End()
|
||||
|
||||
subIndex := r.URL.Query().Get("subcommittee_index")
|
||||
index, valid := shared.ValidateUint(w, "Subcommittee Index", subIndex)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
rawSlot := r.URL.Query().Get("slot")
|
||||
slot, valid := shared.ValidateUint(w, "Slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
rawBlockRoot := r.URL.Query().Get("beacon_block_root")
|
||||
blockRoot, err := hexutil.Decode(rawBlockRoot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Invalid Beacon Block Root: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
contribution, ok := s.produceSyncCommitteeContribution(ctx, w, primitives.Slot(slot), index, []byte(blockRoot))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
response := &ProduceSyncCommitteeContributionResponse{
|
||||
Data: contribution,
|
||||
}
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// ProduceSyncCommitteeContribution requests that the beacon node produce a sync committee contribution.
|
||||
func (s *Server) produceSyncCommitteeContribution(
|
||||
ctx context.Context,
|
||||
w http.ResponseWriter,
|
||||
slot primitives.Slot,
|
||||
index uint64,
|
||||
blockRoot []byte,
|
||||
) (*shared.SyncCommitteeContribution, bool) {
|
||||
msgs, err := s.SyncCommitteePool.SyncCommitteeMessages(slot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get sync subcommittee messages: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
if len(msgs) == 0 {
|
||||
http2.HandleError(w, "No subcommittee messages found", http.StatusNotFound)
|
||||
return nil, false
|
||||
}
|
||||
sig, aggregatedBits, err := s.CoreService.AggregatedSigAndAggregationBits(
|
||||
ctx,
|
||||
ðpbalpha.AggregatedSigAndAggregationBitsRequest{
|
||||
Msgs: msgs,
|
||||
Slot: slot,
|
||||
SubnetId: index,
|
||||
BlockRoot: blockRoot,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get contribution data: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &shared.SyncCommitteeContribution{
|
||||
Slot: strconv.FormatUint(uint64(slot), 10),
|
||||
BeaconBlockRoot: hexutil.Encode(blockRoot),
|
||||
SubcommitteeIndex: strconv.FormatUint(index, 10),
|
||||
AggregationBits: hexutil.Encode(aggregatedBits),
|
||||
Signature: hexutil.Encode(sig),
|
||||
}, true
|
||||
}
|
||||
|
||||
// RegisterValidator requests that the beacon node stores valid validator registrations and calls the builder apis to update the custom builder
|
||||
func (s *Server) RegisterValidator(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.RegisterValidators")
|
||||
defer span.End()
|
||||
|
||||
if s.BlockBuilder == nil || !s.BlockBuilder.Configured() {
|
||||
http2.HandleError(w, fmt.Sprintf("Could not register block builder: %v", builder.ErrNoBuilder), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var jsonRegistrations []*shared.SignedValidatorRegistration
|
||||
err := json.NewDecoder(r.Body).Decode(&jsonRegistrations)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
validate := validator.New()
|
||||
registrations := make([]*ethpbalpha.SignedValidatorRegistrationV1, len(jsonRegistrations))
|
||||
for i, registration := range jsonRegistrations {
|
||||
if err := validate.Struct(registration); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
reg, err := registration.ToConsensus()
|
||||
if err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
registrations[i] = reg
|
||||
}
|
||||
if len(registrations) == 0 {
|
||||
http2.HandleError(w, "Validator registration request is empty", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.BlockBuilder.RegisterValidator(ctx, registrations); err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -32,4 +33,5 @@ type Server struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
BlockBuilder builder.BlockBuilder
|
||||
OperationNotifier operation.Notifier
|
||||
CoreService *core.Service
|
||||
}
|
||||
|
||||
@@ -3,13 +3,29 @@ package validator
|
||||
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Data *shared.Attestation `json:"data" validate:"required"`
|
||||
Data *shared.Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
Data []*shared.SignedContributionAndProof `json:"data" validate:"required"`
|
||||
Data []*shared.SignedContributionAndProof `json:"data" validate:"required,dive"`
|
||||
}
|
||||
|
||||
type SubmitAggregateAndProofsRequest struct {
|
||||
Data []*shared.SignedAggregateAttestationAndProof `json:"data" validate:"required"`
|
||||
Data []*shared.SignedAggregateAttestationAndProof `json:"data" validate:"required,dive"`
|
||||
}
|
||||
|
||||
type SubmitSyncCommitteeSubscriptionsRequest struct {
|
||||
Data []*shared.SyncCommitteeSubscription `json:"data" validate:"required,dive"`
|
||||
}
|
||||
|
||||
type SubmitBeaconCommitteeSubscriptionsRequest struct {
|
||||
Data []*shared.BeaconCommitteeSubscription `json:"data" validate:"required,dive"`
|
||||
}
|
||||
|
||||
type GetAttestationDataResponse struct {
|
||||
Data *shared.AttestationData `json:"data"`
|
||||
}
|
||||
|
||||
type ProduceSyncCommitteeContributionResponse struct {
|
||||
Data *shared.SyncCommitteeContribution `json:"data"`
|
||||
}
|
||||
|
||||
@@ -6,19 +6,14 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -751,249 +746,6 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistration submits validator registrations.
|
||||
func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpbv1.SubmitValidatorRegistrationsRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitValidatorRegistration")
|
||||
defer span.End()
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not register block builder: %v", builder.ErrNoBuilder)
|
||||
}
|
||||
var registrations []*ethpbalpha.SignedValidatorRegistrationV1
|
||||
for i, registration := range reg.Registrations {
|
||||
message := reg.Registrations[i].Message
|
||||
registrations = append(registrations, ðpbalpha.SignedValidatorRegistrationV1{
|
||||
Message: ðpbalpha.ValidatorRegistrationV1{
|
||||
FeeRecipient: message.FeeRecipient,
|
||||
GasLimit: message.GasLimit,
|
||||
Timestamp: message.Timestamp,
|
||||
Pubkey: message.Pubkey,
|
||||
},
|
||||
Signature: registration.Signature,
|
||||
})
|
||||
}
|
||||
if len(registrations) == 0 {
|
||||
return &empty.Empty{}, status.Errorf(codes.InvalidArgument, "Validator registration request is empty")
|
||||
}
|
||||
|
||||
if err := vs.BlockBuilder.RegisterValidator(ctx, registrations); err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not register block builder: %v", err)
|
||||
}
|
||||
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// ProduceAttestationData requests that the beacon node produces attestation data for
|
||||
// the requested committee index and slot based on the nodes current head.
|
||||
func (vs *Server) ProduceAttestationData(ctx context.Context, req *ethpbv1.ProduceAttestationDataRequest) (*ethpbv1.ProduceAttestationDataResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceAttestationData")
|
||||
defer span.End()
|
||||
|
||||
v1alpha1req := ðpbalpha.AttestationDataRequest{
|
||||
Slot: req.Slot,
|
||||
CommitteeIndex: req.CommitteeIndex,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetAttestationData(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
attData := migration.V1Alpha1AttDataToV1(v1alpha1resp)
|
||||
|
||||
return ðpbv1.ProduceAttestationDataResponse{Data: attData}, nil
|
||||
}
|
||||
|
||||
// SubmitBeaconCommitteeSubscription searches using discv5 for peers related to the provided subnet information
|
||||
// and replaces current peers with those ones if necessary.
|
||||
func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *ethpbv1.SubmitBeaconCommitteeSubscriptionsRequest) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(req.Data) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "No subscriptions provided")
|
||||
}
|
||||
|
||||
s, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
// Verify validators at the beginning to return early if request is invalid.
|
||||
validators := make([]state.ReadOnlyValidator, len(req.Data))
|
||||
for i, sub := range req.Data {
|
||||
val, err := s.ValidatorAtIndexReadOnly(sub.ValidatorIndex)
|
||||
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid validator ID: %v", outOfRangeErr)
|
||||
}
|
||||
validators[i] = val
|
||||
}
|
||||
|
||||
fetchValsLen := func(slot primitives.Slot) (uint64, error) {
|
||||
wantedEpoch := slots.ToEpoch(slot)
|
||||
vals, err := vs.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(vals)), nil
|
||||
}
|
||||
|
||||
// Request the head validator indices of epoch represented by the first requested slot.
|
||||
currValsLen, err := fetchValsLen(req.Data[0].Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head validator length: %v", err)
|
||||
}
|
||||
currEpoch := slots.ToEpoch(req.Data[0].Slot)
|
||||
|
||||
for _, sub := range req.Data {
|
||||
// If epoch has changed, re-request active validators length
|
||||
if currEpoch != slots.ToEpoch(sub.Slot) {
|
||||
currValsLen, err = fetchValsLen(sub.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head validator length: %v", err)
|
||||
}
|
||||
currEpoch = slots.ToEpoch(sub.Slot)
|
||||
}
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(currValsLen, sub.CommitteeIndex, sub.Slot)
|
||||
cache.SubnetIDs.AddAttesterSubnetID(sub.Slot, subnet)
|
||||
if sub.IsAggregator {
|
||||
cache.SubnetIDs.AddAggregatorSubnetID(sub.Slot, subnet)
|
||||
}
|
||||
}
|
||||
|
||||
for _, val := range validators {
|
||||
valStatus, err := rpchelpers.ValidatorStatus(val, currEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve validator status: %v", err)
|
||||
}
|
||||
pubkey := val.PublicKey()
|
||||
v1alpha1Req := ðpbalpha.AssignValidatorToSubnetRequest{PublicKey: pubkey[:], Status: v1ValidatorStatusToV1Alpha1(valStatus)}
|
||||
_, err = vs.V1Alpha1Server.AssignValidatorToSubnet(ctx, v1alpha1Req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not assign validator to subnet")
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitSyncCommitteeSubscription subscribe to a number of sync committee subnets.
|
||||
//
|
||||
// Subscribing to sync committee subnets is an action performed by VC to enable
|
||||
// network participation in Altair networks, and only required if the VC has an active
|
||||
// validator in an active sync committee.
|
||||
func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethpbv2.SubmitSyncCommitteeSubscriptionsRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(req.Data) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "No subscriptions provided")
|
||||
}
|
||||
s, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
currEpoch := slots.ToEpoch(s.Slot())
|
||||
validators := make([]state.ReadOnlyValidator, len(req.Data))
|
||||
for i, sub := range req.Data {
|
||||
val, err := s.ValidatorAtIndexReadOnly(sub.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get validator at index %d: %v", sub.ValidatorIndex, err)
|
||||
}
|
||||
valStatus, err := rpchelpers.ValidatorSubStatus(val, currEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get validator status at index %d: %v", sub.ValidatorIndex, err)
|
||||
}
|
||||
if valStatus != ethpbv1.ValidatorStatus_ACTIVE_ONGOING && valStatus != ethpbv1.ValidatorStatus_ACTIVE_EXITING {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Validator at index %d is not active or exiting: %v", sub.ValidatorIndex, err)
|
||||
}
|
||||
validators[i] = val
|
||||
}
|
||||
|
||||
startEpoch, err := slots.SyncCommitteePeriodStartEpoch(currEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync committee period start epoch: %v.", err)
|
||||
}
|
||||
|
||||
for i, sub := range req.Data {
|
||||
if sub.UntilEpoch <= currEpoch {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Epoch for subscription at index %d is in the past. It must be at least %d", i, currEpoch+1)
|
||||
}
|
||||
maxValidUntilEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod*2
|
||||
if sub.UntilEpoch > maxValidUntilEpoch {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Epoch for subscription at index %d is too far in the future. It can be at most %d",
|
||||
i,
|
||||
maxValidUntilEpoch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
for i, sub := range req.Data {
|
||||
pubkey48 := validators[i].PublicKey()
|
||||
// Handle overflow in the event current epoch is less than end epoch.
|
||||
// This is an impossible condition, so it is a defensive check.
|
||||
epochsToWatch, err := sub.UntilEpoch.SafeSub(uint64(startEpoch))
|
||||
if err != nil {
|
||||
epochsToWatch = 0
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
totalDuration := epochDuration * time.Duration(epochsToWatch)
|
||||
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey48[:], startEpoch, sub.SyncCommitteeIndices, totalDuration)
|
||||
}
|
||||
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// ProduceSyncCommitteeContribution requests that the beacon node produce a sync committee contribution.
|
||||
func (vs *Server) ProduceSyncCommitteeContribution(
|
||||
ctx context.Context,
|
||||
req *ethpbv2.ProduceSyncCommitteeContributionRequest,
|
||||
) (*ethpbv2.ProduceSyncCommitteeContributionResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceSyncCommitteeContribution")
|
||||
defer span.End()
|
||||
|
||||
msgs, err := vs.SyncCommitteePool.SyncCommitteeMessages(req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee messages: %v", err)
|
||||
}
|
||||
if msgs == nil {
|
||||
return nil, status.Errorf(codes.NotFound, "No subcommittee messages found")
|
||||
}
|
||||
v1alpha1Req := ðpbalpha.AggregatedSigAndAggregationBitsRequest{
|
||||
Msgs: msgs,
|
||||
Slot: req.Slot,
|
||||
SubnetId: req.SubcommitteeIndex,
|
||||
BlockRoot: req.BeaconBlockRoot,
|
||||
}
|
||||
v1alpha1Resp, err := vs.V1Alpha1Server.AggregatedSigAndAggregationBits(ctx, v1alpha1Req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get contribution data: %v", err)
|
||||
}
|
||||
contribution := ðpbv2.SyncCommitteeContribution{
|
||||
Slot: req.Slot,
|
||||
BeaconBlockRoot: req.BeaconBlockRoot,
|
||||
SubcommitteeIndex: req.SubcommitteeIndex,
|
||||
AggregationBits: v1alpha1Resp.Bits,
|
||||
Signature: v1alpha1Resp.AggregatedSig,
|
||||
}
|
||||
|
||||
return ðpbv2.ProduceSyncCommitteeContributionResponse{
|
||||
Data: contribution,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetLiveness requests the beacon node to indicate if a validator has been observed to be live in a given epoch.
|
||||
// The beacon node might detect liveness by observing messages from the validator on the network,
|
||||
// in the beacon chain, from its API or from any other source.
|
||||
@@ -1104,20 +856,6 @@ func proposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte,
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Logic based on https://hackmd.io/ofFJ5gOmQpu1jjHilHbdQQ
|
||||
func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.ValidatorStatus {
|
||||
switch valStatus {
|
||||
case ethpbv1.ValidatorStatus_ACTIVE:
|
||||
return ethpbalpha.ValidatorStatus_ACTIVE
|
||||
case ethpbv1.ValidatorStatus_PENDING:
|
||||
return ethpbalpha.ValidatorStatus_PENDING
|
||||
case ethpbv1.ValidatorStatus_WITHDRAWAL:
|
||||
return ethpbalpha.ValidatorStatus_EXITED
|
||||
default:
|
||||
return ethpbalpha.ValidatorStatus_UNKNOWN_STATUS
|
||||
}
|
||||
}
|
||||
|
||||
func syncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
|
||||
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
// Return the last epoch of the next sync committee.
|
||||
|
||||
@@ -11,18 +11,15 @@ import (
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
@@ -33,10 +30,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestGetAttesterDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
ctx := context.Background()
|
||||
genesis := util.NewBeaconBlock()
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
@@ -196,6 +194,8 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAttesterDuties_SyncNotReady(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
@@ -210,6 +210,8 @@ func TestGetAttesterDuties_SyncNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
ctx := context.Background()
|
||||
genesis := util.NewBeaconBlock()
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
@@ -264,10 +266,10 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
}
|
||||
vid, _, has := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(11, [32]byte{})
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.ValidatorIndex(9982), vid)
|
||||
require.Equal(t, primitives.ValidatorIndex(12289), vid)
|
||||
require.NotNil(t, expectedDuty, "Expected duty for slot 11 not found")
|
||||
assert.Equal(t, primitives.ValidatorIndex(9982), expectedDuty.ValidatorIndex)
|
||||
assert.DeepEqual(t, pubKeys[9982], expectedDuty.Pubkey)
|
||||
assert.Equal(t, primitives.ValidatorIndex(12289), expectedDuty.ValidatorIndex)
|
||||
assert.DeepEqual(t, pubKeys[12289], expectedDuty.Pubkey)
|
||||
})
|
||||
|
||||
t.Run("Next epoch", func(t *testing.T) {
|
||||
@@ -303,10 +305,10 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
}
|
||||
vid, _, has := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(43, [32]byte{})
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.ValidatorIndex(4863), vid)
|
||||
require.Equal(t, primitives.ValidatorIndex(1360), vid)
|
||||
require.NotNil(t, expectedDuty, "Expected duty for slot 43 not found")
|
||||
assert.Equal(t, primitives.ValidatorIndex(4863), expectedDuty.ValidatorIndex)
|
||||
assert.DeepEqual(t, pubKeys[4863], expectedDuty.Pubkey)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1360), expectedDuty.ValidatorIndex)
|
||||
assert.DeepEqual(t, pubKeys[1360], expectedDuty.Pubkey)
|
||||
})
|
||||
|
||||
t.Run("Prune payload ID cache ok", func(t *testing.T) {
|
||||
@@ -345,7 +347,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.Equal(t, primitives.ValidatorIndex(0), vid)
|
||||
vid, _, has = vs.ProposerSlotIndexCache.GetProposerPayloadIDs(32, [32]byte{})
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.ValidatorIndex(4309), vid)
|
||||
require.Equal(t, primitives.ValidatorIndex(10565), vid)
|
||||
})
|
||||
|
||||
t.Run("Epoch out of bound", func(t *testing.T) {
|
||||
@@ -413,6 +415,8 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetProposerDuties_SyncNotReady(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
@@ -427,6 +431,8 @@ func TestGetProposerDuties_SyncNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
ctx := context.Background()
|
||||
genesisTime := time.Now()
|
||||
numVals := uint64(11)
|
||||
@@ -666,6 +672,8 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
@@ -680,6 +688,8 @@ func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncCommitteeDutiesLastValidEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
t.Run("first epoch of current period", func(t *testing.T) {
|
||||
assert.Equal(t, params.BeaconConfig().EpochsPerSyncCommitteePeriod*2-1, syncCommitteeDutiesLastValidEpoch(0))
|
||||
})
|
||||
@@ -1227,445 +1237,6 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceAttestationData(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
targetBlock := util.NewBeaconBlock()
|
||||
targetBlock.Block.Slot = 1 * params.BeaconConfig().SlotsPerEpoch
|
||||
justifiedBlock := util.NewBeaconBlock()
|
||||
justifiedBlock.Block.Slot = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
blockRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash beacon block")
|
||||
justifiedRoot, err := justifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for justified block")
|
||||
targetRoot, err := targetBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root for target block")
|
||||
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(slot))
|
||||
err = beaconState.SetCurrentJustifiedCheckpoint(ðpbalpha.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: justifiedRoot[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoots := beaconState.BlockRoots()
|
||||
blockRoots[1] = blockRoot[:]
|
||||
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:]
|
||||
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:]
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
P2P: &p2pmock.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: beaconState, Root: blockRoot[:],
|
||||
},
|
||||
FinalizationFetcher: &mockChain.ChainService{
|
||||
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
}
|
||||
v1Server := &Server{
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
}
|
||||
|
||||
req := ðpbv1.ProduceAttestationDataRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 3*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
}
|
||||
res, err := v1Server.ProduceAttestationData(context.Background(), req)
|
||||
require.NoError(t, err, "Could not get attestation info at slot")
|
||||
|
||||
expectedInfo := ðpbv1.AttestationData{
|
||||
Slot: 3*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: justifiedRoot[:],
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: blockRoot[:],
|
||||
},
|
||||
}
|
||||
|
||||
if !proto.Equal(res.Data, expectedInfo) {
|
||||
t.Errorf("Expected attestation info to match, received %v, wanted %v", res, expectedInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBeaconCommitteeSubscription(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
genesis := util.NewBeaconBlock()
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
// Set state to non-epoch start slot.
|
||||
require.NoError(t, bs.SetSlot(5))
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
roots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
roots[0] = genesisRoot[:]
|
||||
require.NoError(t, bs.SetBlockRoots(roots))
|
||||
|
||||
pubKeys := make([][]byte, len(deposits))
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
pubKeys[i] = deposits[i].Data.PublicKey
|
||||
}
|
||||
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: &v1alpha1validator.Server{},
|
||||
}
|
||||
|
||||
t.Run("Single subscription", func(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv1.BeaconCommitteeSubscribe{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
IsAggregator: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
subnets := cache.SubnetIDs.GetAttesterSubnetIDs(1)
|
||||
require.Equal(t, 1, len(subnets))
|
||||
assert.Equal(t, uint64(4), subnets[0])
|
||||
})
|
||||
|
||||
t.Run("Multiple subscriptions", func(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv1.BeaconCommitteeSubscribe{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
IsAggregator: false,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 1000,
|
||||
CommitteeIndex: 16,
|
||||
Slot: 1,
|
||||
IsAggregator: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
subnets := cache.SubnetIDs.GetAttesterSubnetIDs(1)
|
||||
require.Equal(t, 2, len(subnets))
|
||||
})
|
||||
|
||||
t.Run("Is aggregator", func(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv1.BeaconCommitteeSubscribe{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
IsAggregator: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
ids := cache.SubnetIDs.GetAggregatorSubnetIDs(primitives.Slot(1))
|
||||
assert.Equal(t, 1, len(ids))
|
||||
})
|
||||
|
||||
t.Run("Validators assigned to subnet", func(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv1.BeaconCommitteeSubscribe{
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
IsAggregator: true,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
IsAggregator: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
ids, ok, _ := cache.SubnetIDs.GetPersistentSubnets(pubKeys[1])
|
||||
require.Equal(t, true, ok, "subnet for validator 1 not found")
|
||||
assert.Equal(t, 1, len(ids))
|
||||
ids, ok, _ = cache.SubnetIDs.GetPersistentSubnets(pubKeys[2])
|
||||
require.Equal(t, true, ok, "subnet for validator 2 not found")
|
||||
assert.Equal(t, 1, len(ids))
|
||||
})
|
||||
|
||||
t.Run("No subscriptions", func(t *testing.T) {
|
||||
req := ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{
|
||||
Data: make([]*ethpbv1.BeaconCommitteeSubscribe, 0),
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "No subscriptions provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBeaconCommitteeSubscription_SyncNotReady(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(context.Background(), ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestSubmitSyncCommitteeSubscription(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
genesis := util.NewBeaconBlock()
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(64)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
roots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
roots[0] = genesisRoot[:]
|
||||
require.NoError(t, bs.SetBlockRoots(roots))
|
||||
|
||||
pubkeys := make([][]byte, len(deposits))
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
pubkeys[i] = deposits[i].Data.PublicKey
|
||||
}
|
||||
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: &v1alpha1validator.Server{},
|
||||
}
|
||||
|
||||
t.Run("Single subscription", func(t *testing.T) {
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
SyncCommitteeIndices: []uint64{0, 2},
|
||||
UntilEpoch: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
subnets, _, _, _ := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkeys[0], 0)
|
||||
require.Equal(t, 2, len(subnets))
|
||||
assert.Equal(t, uint64(0), subnets[0])
|
||||
assert.Equal(t, uint64(2), subnets[1])
|
||||
})
|
||||
|
||||
t.Run("Multiple subscriptions", func(t *testing.T) {
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
SyncCommitteeIndices: []uint64{0},
|
||||
UntilEpoch: 1,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
SyncCommitteeIndices: []uint64{2},
|
||||
UntilEpoch: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
subnets, _, _, _ := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkeys[0], 0)
|
||||
require.Equal(t, 1, len(subnets))
|
||||
assert.Equal(t, uint64(0), subnets[0])
|
||||
subnets, _, _, _ = cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkeys[1], 0)
|
||||
require.Equal(t, 1, len(subnets))
|
||||
assert.Equal(t, uint64(2), subnets[0])
|
||||
})
|
||||
|
||||
t.Run("No subscriptions", func(t *testing.T) {
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: make([]*ethpbv2.SyncCommitteeSubscription, 0),
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "No subscriptions provided", err)
|
||||
})
|
||||
|
||||
t.Run("Invalid validator index", func(t *testing.T) {
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 99,
|
||||
SyncCommitteeIndices: []uint64{},
|
||||
UntilEpoch: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "Could not get validator at index 99", err)
|
||||
})
|
||||
|
||||
t.Run("Epoch in the past", func(t *testing.T) {
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
SyncCommitteeIndices: []uint64{},
|
||||
UntilEpoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "Epoch for subscription at index 0 is in the past", err)
|
||||
})
|
||||
|
||||
t.Run("First epoch after the next sync committee is valid", func(t *testing.T) {
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
SyncCommitteeIndices: []uint64{},
|
||||
UntilEpoch: 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Epoch too far in the future", func(t *testing.T) {
|
||||
req := ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{
|
||||
Data: []*ethpbv2.SyncCommitteeSubscription{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
SyncCommitteeIndices: []uint64{},
|
||||
UntilEpoch: 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod + 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "Epoch for subscription at index 0 is too far in the future", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitSyncCommitteeSubscription_SyncNotReady(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(context.Background(), ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
root := bytesutil.PadTo([]byte("root"), 32)
|
||||
sig := bls.NewAggregateSignature().Marshal()
|
||||
messsage := ðpbalpha.SyncCommitteeMessage{
|
||||
Slot: 0,
|
||||
BlockRoot: root,
|
||||
ValidatorIndex: 0,
|
||||
Signature: sig,
|
||||
}
|
||||
syncCommitteePool := synccommittee.NewStore()
|
||||
require.NoError(t, syncCommitteePool.SaveSyncCommitteeMessage(messsage))
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
}
|
||||
server := Server{
|
||||
V1Alpha1Server: v1Server,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
}
|
||||
|
||||
req := ðpbv2.ProduceSyncCommitteeContributionRequest{
|
||||
Slot: 0,
|
||||
SubcommitteeIndex: 0,
|
||||
BeaconBlockRoot: root,
|
||||
}
|
||||
resp, err := server.ProduceSyncCommitteeContribution(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), resp.Data.Slot)
|
||||
assert.Equal(t, uint64(0), resp.Data.SubcommitteeIndex)
|
||||
assert.DeepEqual(t, root, resp.Data.BeaconBlockRoot)
|
||||
aggregationBits := resp.Data.AggregationBits
|
||||
assert.Equal(t, true, aggregationBits.BitAt(0))
|
||||
assert.DeepEqual(t, sig, resp.Data.Signature)
|
||||
|
||||
syncCommitteePool = synccommittee.NewStore()
|
||||
v1Server = &v1alpha1validator.Server{
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
}
|
||||
server = Server{
|
||||
V1Alpha1Server: v1Server,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
}
|
||||
req = ðpbv2.ProduceSyncCommitteeContributionRequest{
|
||||
Slot: 0,
|
||||
SubcommitteeIndex: 0,
|
||||
BeaconBlockRoot: root,
|
||||
}
|
||||
_, err = server.ProduceSyncCommitteeContribution(ctx, req)
|
||||
assert.ErrorContains(t, "No subcommittee messages found", err)
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
type args struct {
|
||||
request *ethpbv1.PrepareBeaconProposerRequest
|
||||
@@ -1807,64 +1378,6 @@ func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_SubmitValidatorRegistrations(t *testing.T) {
|
||||
type args struct {
|
||||
request *ethpbv1.SubmitValidatorRegistrationsRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{
|
||||
{
|
||||
Message: ðpbv1.SubmitValidatorRegistrationsRequest_ValidatorRegistration{
|
||||
FeeRecipient: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
GasLimit: 30000000,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "Empty Request",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{},
|
||||
},
|
||||
},
|
||||
wantErr: "Validator registration request is empty",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
server := &Server{
|
||||
BlockBuilder: &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
},
|
||||
BeaconDB: db,
|
||||
}
|
||||
_, err := server.SubmitValidatorRegistration(ctx, tt.args.request)
|
||||
if tt.wantErr != "" {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLiveness(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -103,6 +103,7 @@ go_test(
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -47,4 +48,5 @@ type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
CoreService *core.Service
|
||||
}
|
||||
|
||||
@@ -659,11 +659,7 @@ func (bs *Server) GetValidatorQueue(
|
||||
func (bs *Server) GetValidatorPerformance(
|
||||
ctx context.Context, req *ethpb.ValidatorPerformanceRequest,
|
||||
) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
if bs.SyncChecker.Syncing() {
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
|
||||
response, err := core.ComputeValidatorPerformance(ctx, req, bs.HeadFetcher, currSlot)
|
||||
response, err := bs.CoreService.ComputeValidatorPerformance(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not compute validator performance: %v", err.Err)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
@@ -1797,7 +1798,9 @@ func TestGetValidatorPerformance_Syncing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
CoreService: &core.Service{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
},
|
||||
}
|
||||
|
||||
wanted := "Syncing to latest head, not ready to respond"
|
||||
@@ -1857,11 +1860,13 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
@@ -1918,12 +1923,14 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
|
||||
require.NoError(t, headState.SetValidators(validators))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
@@ -1988,12 +1995,14 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
@@ -2064,11 +2073,13 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
@@ -2132,11 +2143,13 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
@@ -2200,11 +2213,13 @@ func TestGetValidatorPerformanceCapella_OK(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
|
||||
@@ -113,7 +113,7 @@ func (vs *Server) SubmitSignedAggregateSelectionProof(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedAggregateSubmitRequest,
|
||||
) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
if err := core.SubmitSignedAggregateSelectionProof(ctx, req, vs.TimeFetcher.GenesisTime(), vs.P2P); err != nil {
|
||||
if err := vs.CoreService.SubmitSignedAggregateSelectionProof(ctx, req); err != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not submit aggregate: %v", err.Err)
|
||||
}
|
||||
return ðpb.SignedAggregateSubmitResponse{}, nil
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -431,7 +432,11 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T)
|
||||
|
||||
func TestSubmitSignedAggregateSelectionProof_InvalidSlot(t *testing.T) {
|
||||
c := &mock.ChainService{Genesis: time.Now()}
|
||||
aggregatorServer := &Server{TimeFetcher: c}
|
||||
aggregatorServer := &Server{
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: c,
|
||||
},
|
||||
}
|
||||
req := ðpb.SignedAggregateSubmitRequest{
|
||||
SignedAggregateAndProof: ðpb.SignedAggregateAttestationAndProof{
|
||||
Signature: []byte{'a'},
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
beaconState "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -234,8 +234,8 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
|
||||
// Assign relevant validator to subnet.
|
||||
assignValidatorToSubnet(pubKey, assignment.Status)
|
||||
assignValidatorToSubnet(pubKey, nextAssignment.Status)
|
||||
core.AssignValidatorToSubnetProto(pubKey, assignment.Status)
|
||||
core.AssignValidatorToSubnetProto(pubKey, nextAssignment.Status)
|
||||
}
|
||||
|
||||
return ðpb.DutiesResponse{
|
||||
@@ -248,34 +248,10 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func (vs *Server) AssignValidatorToSubnet(_ context.Context, req *ethpb.AssignValidatorToSubnetRequest) (*emptypb.Empty, error) {
|
||||
assignValidatorToSubnet(req.PublicKey, req.Status)
|
||||
core.AssignValidatorToSubnetProto(req.PublicKey, req.Status)
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
func assignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorStatus) {
|
||||
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
|
||||
return
|
||||
}
|
||||
|
||||
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
|
||||
if ok && expTime.After(prysmTime.Now()) {
|
||||
return
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
var assignedIdxs []uint64
|
||||
randGen := rand.NewGenerator()
|
||||
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
|
||||
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
|
||||
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
|
||||
}
|
||||
|
||||
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
|
||||
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
|
||||
|
||||
totalDuration := epochDuration * time.Duration(assignedDuration)
|
||||
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
|
||||
}
|
||||
|
||||
func registerSyncSubnetCurrentPeriod(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status ethpb.ValidatorStatus) error {
|
||||
committee, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -606,7 +607,7 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
|
||||
func TestAssignValidatorToSubnet(t *testing.T) {
|
||||
k := pubKey(3)
|
||||
|
||||
assignValidatorToSubnet(k, ethpb.ValidatorStatus_ACTIVE)
|
||||
core.AssignValidatorToSubnetProto(k, ethpb.ValidatorStatus_ACTIVE)
|
||||
coms, ok, exp := cache.SubnetIDs.GetPersistentSubnets(k)
|
||||
require.Equal(t, true, ok, "No cache entry found for validator")
|
||||
assert.Equal(t, params.BeaconConfig().RandomSubnetsPerValidator, uint64(len(coms)))
|
||||
|
||||
@@ -2,19 +2,14 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -42,102 +37,9 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := helpers.ValidateAttestationTime(req.Slot, vs.TimeFetcher.GenesisTime(),
|
||||
params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid request: %v", err))
|
||||
}
|
||||
|
||||
res, err := vs.AttestationCache.Get(ctx, req)
|
||||
res, err := vs.CoreService.GetAttestationData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve data from attestation cache: %v", err)
|
||||
}
|
||||
if res != nil {
|
||||
res.CommitteeIndex = req.CommitteeIndex
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if err := vs.AttestationCache.MarkInProgress(req); err != nil {
|
||||
if errors.Is(err, cache.ErrAlreadyInProgress) {
|
||||
res, err := vs.AttestationCache.Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve data from attestation cache: %v", err)
|
||||
}
|
||||
if res == nil {
|
||||
return nil, status.Error(codes.DataLoss, "A request was in progress and resolved to nil")
|
||||
}
|
||||
res.CommitteeIndex = req.CommitteeIndex
|
||||
return res, nil
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Could not mark attestation as in-progress: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := vs.AttestationCache.MarkNotInProgress(req); err != nil {
|
||||
log.WithError(err).Error("Could not mark cache not in progress")
|
||||
}
|
||||
}()
|
||||
|
||||
headState, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
|
||||
}
|
||||
headRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
|
||||
// In the case that we receive an attestation request after a newer state/block has been processed.
|
||||
if headState.Slot() > req.Slot {
|
||||
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get historical head root: %v", err)
|
||||
}
|
||||
headState, err = vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get historical head state: %v", err)
|
||||
}
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return nil, status.Error(codes.Internal, "Could not lookup parent state from head.")
|
||||
}
|
||||
|
||||
if time.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
|
||||
}
|
||||
}
|
||||
|
||||
targetEpoch := time.CurrentEpoch(headState)
|
||||
epochStartSlot, err := slots.EpochStart(targetEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var targetRoot []byte
|
||||
if epochStartSlot == headState.Slot() {
|
||||
targetRoot = headRoot
|
||||
} else {
|
||||
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get target block for slot %d: %v", epochStartSlot, err)
|
||||
}
|
||||
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
|
||||
targetRoot = headRoot
|
||||
}
|
||||
}
|
||||
|
||||
res = ðpb.AttestationData{
|
||||
Slot: req.Slot,
|
||||
CommitteeIndex: req.CommitteeIndex,
|
||||
BeaconBlockRoot: headRoot,
|
||||
Source: headState.CurrentJustifiedCheckpoint(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: targetEpoch,
|
||||
Root: targetRoot,
|
||||
},
|
||||
}
|
||||
|
||||
if err := vs.AttestationCache.Put(ctx, req, res); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not store attestation data in cache: %v", err)
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not get attestation data: %v", err.Err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -65,20 +65,16 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = epochBoundaryRoot[:]
|
||||
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedBlockRoot[:]
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
}
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -34,7 +35,6 @@ func TestProposeAttestation_OK(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
@@ -78,7 +78,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
@@ -117,24 +116,20 @@ func TestGetAttestationData_OK(t *testing.T) {
|
||||
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:]
|
||||
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:]
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: beaconState, Root: blockRoot[:],
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: beaconState, Root: blockRoot[:],
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
},
|
||||
TimeFetcher: &mock.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
}
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
@@ -163,7 +158,7 @@ func TestGetAttestationData_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAttestationData_SyncNotReady(t *testing.T) {
|
||||
as := &Server{
|
||||
as := Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
_, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
@@ -178,9 +173,12 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
|
||||
as := &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
},
|
||||
}
|
||||
_, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
s, ok := status.FromError(err)
|
||||
@@ -192,10 +190,13 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
as = &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
},
|
||||
}
|
||||
_, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
require.NoError(t, err)
|
||||
@@ -206,17 +207,17 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
|
||||
state, err := state_native.InitializeFromProtoPhase0(s)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
slot := primitives.Slot(2)
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: state},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{State: state},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
}
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
@@ -229,7 +230,7 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 55, Root: make([]byte, 32)},
|
||||
}
|
||||
|
||||
require.NoError(t, server.AttestationCache.MarkInProgress(req))
|
||||
require.NoError(t, server.CoreService.AttestationCache.MarkInProgress(req))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -247,8 +248,8 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
assert.NoError(t, server.AttestationCache.Put(ctx, req, res))
|
||||
assert.NoError(t, server.AttestationCache.MarkNotInProgress(req))
|
||||
assert.NoError(t, server.CoreService.AttestationCache.Put(ctx, req, res))
|
||||
assert.NoError(t, server.CoreService.AttestationCache.MarkNotInProgress(req))
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
@@ -260,9 +261,12 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) {
|
||||
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
}
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
@@ -323,19 +327,17 @@ func TestServer_GetAttestationData_HeadStateSlotGreaterThanRequestSlot(t *testin
|
||||
beaconstate := beaconState.Copy()
|
||||
require.NoError(t, beaconstate.SetSlot(beaconstate.Slot()-1))
|
||||
require.NoError(t, db.SaveState(ctx, beaconstate, blockRoot2))
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
offset = int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
|
||||
FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint()},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
},
|
||||
}
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blockRoot))
|
||||
util.SaveBlock(t, ctx, db, block)
|
||||
@@ -394,22 +396,18 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
|
||||
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:]
|
||||
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:]
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
attesterServer := &Server{
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: beaconState, Root: blockRoot[:],
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: beaconState, Root: blockRoot[:],
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
},
|
||||
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
}
|
||||
|
||||
req := ðpb.AttestationDataRequest{
|
||||
@@ -441,7 +439,6 @@ func TestServer_SubscribeCommitteeSubnets_NoSlots(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
@@ -462,7 +459,6 @@ func TestServer_SubscribeCommitteeSubnets_DifferentLengthSlots(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
@@ -508,7 +504,6 @@ func TestServer_SubscribeCommitteeSubnets_MultipleSlots(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: state},
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -83,26 +81,9 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||
|
||||
if st.Version() == version.Phase0 {
|
||||
attestationProcessor = blocks.ProcessAttestationNoVerifySignature
|
||||
} else if st.Version() >= version.Altair {
|
||||
// Use a wrapper here, as go needs strong typing for the function signature.
|
||||
attestationProcessor = func(ctx context.Context, st state.BeaconState, attestation *ethpb.Attestation) (state.BeaconState, error) {
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return altair.ProcessAttestationNoVerifySignature(ctx, st, attestation, totalBalance)
|
||||
}
|
||||
} else {
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts
|
||||
}
|
||||
|
||||
for _, att := range a {
|
||||
if _, err := attestationProcessor(ctx, st, att); err == nil {
|
||||
if err := blocks.VerifyAttestationNoVerifySignature(ctx, st, att); err == nil {
|
||||
validAtts = append(validAtts, att)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
@@ -41,7 +42,6 @@ import (
|
||||
// and more.
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
AttestationCache *cache.AttestationCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
@@ -74,6 +74,7 @@ type Server struct {
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CoreService *core.Service
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -100,7 +97,14 @@ func (vs *Server) GetSyncCommitteeContribution(
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
}
|
||||
aggregatedSig, bits, err := vs.aggregatedSigAndAggregationBits(ctx, msgs, req.Slot, req.SubnetId, headRoot)
|
||||
sig, aggregatedBits, err := vs.CoreService.AggregatedSigAndAggregationBits(
|
||||
ctx,
|
||||
ðpb.AggregatedSigAndAggregationBitsRequest{
|
||||
Msgs: msgs,
|
||||
Slot: req.Slot,
|
||||
SubnetId: req.SubnetId,
|
||||
BlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get contribution data: %v", err)
|
||||
}
|
||||
@@ -108,8 +112,8 @@ func (vs *Server) GetSyncCommitteeContribution(
|
||||
Slot: req.Slot,
|
||||
BlockRoot: headRoot,
|
||||
SubcommitteeIndex: req.SubnetId,
|
||||
AggregationBits: bits,
|
||||
Signature: aggregatedSig,
|
||||
AggregationBits: aggregatedBits,
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
return contribution, nil
|
||||
@@ -120,7 +124,7 @@ func (vs *Server) GetSyncCommitteeContribution(
|
||||
func (vs *Server) SubmitSignedContributionAndProof(
|
||||
ctx context.Context, s *ethpb.SignedContributionAndProof,
|
||||
) (*emptypb.Empty, error) {
|
||||
err := core.SubmitSignedContributionAndProof(ctx, s, vs.P2P, vs.SyncCommitteePool, vs.OperationNotifier)
|
||||
err := vs.CoreService.SubmitSignedContributionAndProof(ctx, s)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), err.Err.Error())
|
||||
}
|
||||
@@ -133,49 +137,12 @@ func (vs *Server) AggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
req *ethpb.AggregatedSigAndAggregationBitsRequest,
|
||||
) (*ethpb.AggregatedSigAndAggregationBitsResponse, error) {
|
||||
aggregatedSig, bits, err := vs.aggregatedSigAndAggregationBits(ctx, req.Msgs, req.Slot, req.SubnetId, req.BlockRoot)
|
||||
sig, aggregatedBits, err := vs.CoreService.AggregatedSigAndAggregationBits(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
return ðpb.AggregatedSigAndAggregationBitsResponse{AggregatedSig: aggregatedSig, Bits: bits}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) aggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
msgs []*ethpb.SyncCommitteeMessage,
|
||||
slot primitives.Slot,
|
||||
subnetId uint64,
|
||||
blockRoot []byte,
|
||||
) ([]byte, []byte, error) {
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
sigs := make([][]byte, 0, subCommitteeSize)
|
||||
bits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
for _, msg := range msgs {
|
||||
if bytes.Equal(blockRoot, msg.BlockRoot) {
|
||||
headSyncCommitteeIndices, err := vs.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, slot)
|
||||
if err != nil {
|
||||
return []byte{}, nil, errors.Wrapf(err, "could not get sync subcommittee index")
|
||||
}
|
||||
for _, index := range headSyncCommitteeIndices {
|
||||
i := uint64(index)
|
||||
subnetIndex := i / subCommitteeSize
|
||||
indexMod := i % subCommitteeSize
|
||||
if subnetIndex == subnetId && !bits.BitAt(indexMod) {
|
||||
bits.SetBitAt(indexMod, true)
|
||||
sigs = append(sigs, msg.Signature)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
aggregatedSig := make([]byte, 96)
|
||||
aggregatedSig[0] = 0xC0
|
||||
if len(sigs) != 0 {
|
||||
uncompressedSigs, err := bls.MultipleSignaturesFromBytes(sigs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not decompress signatures")
|
||||
}
|
||||
aggregatedSig = bls.AggregateSignatures(uncompressedSigs).Marshal()
|
||||
}
|
||||
|
||||
return aggregatedSig, bits, nil
|
||||
return ðpb.AggregatedSigAndAggregationBitsResponse{
|
||||
AggregatedSig: sig,
|
||||
Bits: aggregatedBits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -101,14 +102,20 @@ func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) {
|
||||
|
||||
func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, 10)
|
||||
syncCommitteePool := synccommittee.NewStore()
|
||||
headFetcher := &mock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{10},
|
||||
}
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{10},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
HeadFetcher: headFetcher,
|
||||
},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
HeadFetcher: headFetcher,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
}
|
||||
secKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
@@ -137,9 +144,11 @@ func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) {
|
||||
|
||||
func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
}
|
||||
contribution := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
@@ -151,21 +160,23 @@ func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
|
||||
}
|
||||
_, err := server.SubmitSignedContributionAndProof(context.Background(), contribution)
|
||||
require.NoError(t, err)
|
||||
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeContributions(1)
|
||||
savedMsgs, err := server.CoreService.SyncCommitteePool.SyncCommitteeContributions(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []*ethpb.SyncCommitteeContribution{contribution.Message.Contribution}, savedMsgs)
|
||||
}
|
||||
|
||||
func TestSubmitSignedContributionAndProof_Notification(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
}
|
||||
|
||||
// Subscribe to operation notifications.
|
||||
opChannel := make(chan *feed.Event, 1024)
|
||||
opSub := server.OperationNotifier.OperationFeed().Subscribe(opChannel)
|
||||
opSub := server.CoreService.OperationNotifier.OperationFeed().Subscribe(opChannel)
|
||||
defer opSub.Unsubscribe()
|
||||
|
||||
contribution := ðpb.SignedContributionAndProof{
|
||||
|
||||
@@ -26,6 +26,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
@@ -11,4 +12,5 @@ type Server struct {
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
CoreService *core.Service
|
||||
}
|
||||
|
||||
@@ -29,12 +29,6 @@ type ValidatorPerformanceResponse struct {
|
||||
|
||||
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
|
||||
func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
|
||||
if vs.SyncChecker.Syncing() {
|
||||
handleHTTPError(w, "Syncing", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
currSlot := vs.GenesisTimeFetcher.CurrentSlot()
|
||||
var req ValidatorPerformanceRequest
|
||||
if r.Body != http.NoBody {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -42,14 +36,12 @@ func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
}
|
||||
computed, err := core.ComputeValidatorPerformance(
|
||||
ctx,
|
||||
computed, err := vs.CoreService.ComputeValidatorPerformance(
|
||||
r.Context(),
|
||||
ðpb.ValidatorPerformanceRequest{
|
||||
PublicKeys: req.PublicKeys,
|
||||
Indices: req.Indices,
|
||||
},
|
||||
vs.HeadFetcher,
|
||||
currSlot,
|
||||
)
|
||||
if err != nil {
|
||||
handleHTTPError(w, "Could not compute validator performance: "+err.Err.Error(), core.ErrorReasonToHTTP(err.Reason))
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -28,12 +29,13 @@ import (
|
||||
func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
t.Run("Syncing", func(t *testing.T) {
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
CoreService: &core.Service{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
req := httptest.NewRequest("POST", "/foo", nil)
|
||||
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
@@ -57,11 +59,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
@@ -111,12 +115,14 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
@@ -174,12 +180,14 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
@@ -243,11 +251,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
@@ -303,11 +313,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
@@ -363,11 +375,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
rpcBuilder "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
|
||||
@@ -231,9 +232,19 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
|
||||
|
||||
coreService := &core.Service{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
StateGen: s.cfg.StateGen,
|
||||
}
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
AttPool: s.cfg.AttestationsPool,
|
||||
ExitPool: s.cfg.ExitPool,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
@@ -266,6 +277,7 @@ func (s *Service) Start() {
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
ClockWaiter: s.cfg.ClockWaiter,
|
||||
CoreService: coreService,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
@@ -283,11 +295,16 @@ func (s *Service) Start() {
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_attestation", validatorServerV1.GetAggregateAttestation).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/contribution_and_proofs", validatorServerV1.SubmitContributionAndProofs).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_and_proofs", validatorServerV1.SubmitAggregateAndProofs).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_contribution", validatorServerV1.ProduceSyncCommitteeContribution).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_subscriptions", validatorServerV1.SubmitSyncCommitteeSubscription).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_subscriptions", validatorServerV1.SubmitBeaconCommitteeSubscription).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/validator/attestation_data", validatorServerV1.GetAttestationData).Methods(http.MethodGet)
|
||||
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
@@ -303,7 +320,7 @@ func (s *Service) Start() {
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
}
|
||||
nodeServerV1 := &node.Server{
|
||||
nodeServerEth := &node.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
@@ -316,6 +333,8 @@ func (s *Service) Start() {
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
s.cfg.Router.HandleFunc("/eth/v1/node/syncing", nodeServerEth.GetSyncStatus).Methods(http.MethodGet)
|
||||
|
||||
nodeServerPrysm := &nodeprysm.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
@@ -354,6 +373,7 @@ func (s *Service) Start() {
|
||||
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
ReplayerBuilder: ch,
|
||||
CoreService: coreService,
|
||||
}
|
||||
beaconChainServerV1 := &beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
@@ -384,12 +404,16 @@ func (s *Service) Start() {
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
CoreService: coreService,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2).Methods(http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconChainServerV1.ListAttestations).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/pool/attestations", beaconChainServerV1.SubmitAttestations).Methods(http.MethodPost)
|
||||
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerEth)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
ethpbservice.RegisterBeaconChainServer(s.grpcServer, beaconChainServerV1)
|
||||
|
||||
@@ -5,6 +5,7 @@ package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -23,6 +24,7 @@ type BeaconState interface {
|
||||
CopyAllTries()
|
||||
HashTreeRoot(ctx context.Context) ([32]byte, error)
|
||||
StateProver
|
||||
json.Marshaler
|
||||
}
|
||||
|
||||
// SpecParametersProvider provides fork-specific configuration parameters as
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -60,3 +61,76 @@ type BeaconState struct {
|
||||
merkleLayers [][][]byte
|
||||
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
|
||||
}
|
||||
|
||||
type beaconStateMarshalable struct {
|
||||
Version int `json:"version" yaml:"version"`
|
||||
GenesisTime uint64 `json:"genesis_time" yaml:"genesis_time"`
|
||||
GenesisValidatorsRoot [32]byte `json:"genesis_validators_root" yaml:"genesis_validators_root"`
|
||||
Slot primitives.Slot `json:"slot" yaml:"slot"`
|
||||
Fork *ethpb.Fork `json:"fork" yaml:"fork"`
|
||||
LatestBlockHeader *ethpb.BeaconBlockHeader `json:"latest_block_header" yaml:"latest_block_header"`
|
||||
BlockRoots *customtypes.BlockRoots `json:"block_roots" yaml:"block_roots"`
|
||||
StateRoots *customtypes.StateRoots `json:"state_roots" yaml:"state_roots"`
|
||||
HistoricalRoots customtypes.HistoricalRoots `json:"historical_roots" yaml:"historical_roots"`
|
||||
HistoricalSummaries []*ethpb.HistoricalSummary `json:"historical_summaries" yaml:"historical_summaries"`
|
||||
Eth1Data *ethpb.Eth1Data `json:"eth_1_data" yaml:"eth_1_data"`
|
||||
Eth1DataVotes []*ethpb.Eth1Data `json:"eth_1_data_votes" yaml:"eth_1_data_votes"`
|
||||
Eth1DepositIndex uint64 `json:"eth_1_deposit_index" yaml:"eth_1_deposit_index"`
|
||||
Validators []*ethpb.Validator `json:"validators" yaml:"validators"`
|
||||
Balances []uint64 `json:"balances" yaml:"balances"`
|
||||
RandaoMixes *customtypes.RandaoMixes `json:"randao_mixes" yaml:"randao_mixes"`
|
||||
Slashings []uint64 `json:"slashings" yaml:"slashings"`
|
||||
PreviousEpochAttestations []*ethpb.PendingAttestation `json:"previous_epoch_attestations" yaml:"previous_epoch_attestations"`
|
||||
CurrentEpochAttestations []*ethpb.PendingAttestation `json:"current_epoch_attestations" yaml:"current_epoch_attestations"`
|
||||
PreviousEpochParticipation []byte `json:"previous_epoch_participation" yaml:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []byte `json:"current_epoch_participation" yaml:"current_epoch_participation"`
|
||||
JustificationBits bitfield.Bitvector4 `json:"justification_bits" yaml:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *ethpb.Checkpoint `json:"previous_justified_checkpoint" yaml:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *ethpb.Checkpoint `json:"current_justified_checkpoint" yaml:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *ethpb.Checkpoint `json:"finalized_checkpoint" yaml:"finalized_checkpoint"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores" yaml:"inactivity_scores"`
|
||||
CurrentSyncCommittee *ethpb.SyncCommittee `json:"current_sync_committee" yaml:"current_sync_committee"`
|
||||
NextSyncCommittee *ethpb.SyncCommittee `json:"next_sync_committee" yaml:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `json:"latest_execution_payload_header" yaml:"latest_execution_payload_header"`
|
||||
LatestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella `json:"latest_execution_payload_header_capella" yaml:"latest_execution_payload_header_capella"`
|
||||
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
|
||||
}
|
||||
|
||||
func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
marshalable := &beaconStateMarshalable{
|
||||
Version: b.version,
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: b.genesisValidatorsRoot,
|
||||
Slot: b.slot,
|
||||
Fork: b.fork,
|
||||
LatestBlockHeader: b.latestBlockHeader,
|
||||
BlockRoots: b.blockRoots,
|
||||
StateRoots: b.stateRoots,
|
||||
HistoricalRoots: b.historicalRoots,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
Eth1Data: b.eth1Data,
|
||||
Eth1DataVotes: b.eth1DataVotes,
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validators,
|
||||
Balances: b.balances,
|
||||
RandaoMixes: b.randaoMixes,
|
||||
Slashings: b.slashings,
|
||||
PreviousEpochAttestations: b.previousEpochAttestations,
|
||||
CurrentEpochAttestations: b.currentEpochAttestations,
|
||||
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||
JustificationBits: b.justificationBits,
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||
InactivityScores: b.inactivityScores,
|
||||
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||
NextSyncCommittee: b.nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader,
|
||||
LatestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
}
|
||||
return json.Marshal(marshalable)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -60,3 +61,76 @@ type BeaconState struct {
|
||||
merkleLayers [][][]byte
|
||||
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
|
||||
}
|
||||
|
||||
type beaconStateMarshalable struct {
|
||||
Version int `json:"version" yaml:"version"`
|
||||
GenesisTime uint64 `json:"genesis_time" yaml:"genesis_time"`
|
||||
GenesisValidatorsRoot [32]byte `json:"genesis_validators_root" yaml:"genesis_validators_root"`
|
||||
Slot primitives.Slot `json:"slot" yaml:"slot"`
|
||||
Fork *ethpb.Fork `json:"fork" yaml:"fork"`
|
||||
LatestBlockHeader *ethpb.BeaconBlockHeader `json:"latest_block_header" yaml:"latest_block_header"`
|
||||
BlockRoots *customtypes.BlockRoots `json:"block_roots" yaml:"block_roots"`
|
||||
StateRoots *customtypes.StateRoots `json:"state_roots" yaml:"state_roots"`
|
||||
HistoricalRoots customtypes.HistoricalRoots `json:"historical_roots" yaml:"historical_roots"`
|
||||
HistoricalSummaries []*ethpb.HistoricalSummary `json:"historical_summaries" yaml:"historical_summaries"`
|
||||
Eth1Data *ethpb.Eth1Data `json:"eth_1_data" yaml:"eth_1_data"`
|
||||
Eth1DataVotes []*ethpb.Eth1Data `json:"eth_1_data_votes" yaml:"eth_1_data_votes"`
|
||||
Eth1DepositIndex uint64 `json:"eth_1_deposit_index" yaml:"eth_1_deposit_index"`
|
||||
Validators []*ethpb.Validator `json:"validators" yaml:"validators"`
|
||||
Balances []uint64 `json:"balances" yaml:"balances"`
|
||||
RandaoMixes *customtypes.RandaoMixes `json:"randao_mixes" yaml:"randao_mixes"`
|
||||
Slashings []uint64 `json:"slashings" yaml:"slashings"`
|
||||
PreviousEpochAttestations []*ethpb.PendingAttestation `json:"previous_epoch_attestations" yaml:"previous_epoch_attestations"`
|
||||
CurrentEpochAttestations []*ethpb.PendingAttestation `json:"current_epoch_attestations" yaml:"current_epoch_attestations"`
|
||||
PreviousEpochParticipation []byte `json:"previous_epoch_participation" yaml:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []byte `json:"current_epoch_participation" yaml:"current_epoch_participation"`
|
||||
JustificationBits bitfield.Bitvector4 `json:"justification_bits" yaml:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *ethpb.Checkpoint `json:"previous_justified_checkpoint" yaml:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *ethpb.Checkpoint `json:"current_justified_checkpoint" yaml:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *ethpb.Checkpoint `json:"finalized_checkpoint" yaml:"finalized_checkpoint"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores" yaml:"inactivity_scores"`
|
||||
CurrentSyncCommittee *ethpb.SyncCommittee `json:"current_sync_committee" yaml:"current_sync_committee"`
|
||||
NextSyncCommittee *ethpb.SyncCommittee `json:"next_sync_committee" yaml:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `json:"latest_execution_payload_header" yaml:"latest_execution_payload_header"`
|
||||
LatestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella `json:"latest_execution_payload_header_capella" yaml:"latest_execution_payload_header_capella"`
|
||||
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
|
||||
}
|
||||
|
||||
func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
marshalable := &beaconStateMarshalable{
|
||||
Version: b.version,
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: b.genesisValidatorsRoot,
|
||||
Slot: b.slot,
|
||||
Fork: b.fork,
|
||||
LatestBlockHeader: b.latestBlockHeader,
|
||||
BlockRoots: b.blockRoots,
|
||||
StateRoots: b.stateRoots,
|
||||
HistoricalRoots: b.historicalRoots,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
Eth1Data: b.eth1Data,
|
||||
Eth1DataVotes: b.eth1DataVotes,
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validators,
|
||||
Balances: b.balances,
|
||||
RandaoMixes: b.randaoMixes,
|
||||
Slashings: b.slashings,
|
||||
PreviousEpochAttestations: b.previousEpochAttestations,
|
||||
CurrentEpochAttestations: b.currentEpochAttestations,
|
||||
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||
JustificationBits: b.justificationBits,
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||
InactivityScores: b.inactivityScores,
|
||||
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||
NextSyncCommittee: b.nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader,
|
||||
LatestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
}
|
||||
return json.Marshal(marshalable)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
srcs = [
|
||||
"bazel.go",
|
||||
"data_path.go",
|
||||
"non_bazel.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/build/bazel",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
23
cache/nonblocking/lru.go
vendored
23
cache/nonblocking/lru.go
vendored
@@ -18,6 +18,7 @@ type LRU[K comparable, V any] struct {
|
||||
evictList *lruList[K, V]
|
||||
items map[K]*entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
getChan chan *entry[K, V]
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
@@ -25,13 +26,19 @@ func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K,
|
||||
if size <= 0 {
|
||||
return nil, errors.New("must provide a positive size")
|
||||
}
|
||||
// Initialize the channel buffer size as being 10% of the cache size.
|
||||
chanSize := size / 10
|
||||
|
||||
c := &LRU[K, V]{
|
||||
size: size,
|
||||
evictList: newList[K, V](),
|
||||
items: make(map[K]*entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
getChan: make(chan *entry[K, V], chanSize),
|
||||
}
|
||||
// Spin off separate go-routine to handle evict list
|
||||
// operations.
|
||||
go c.handleGetRequests()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -77,12 +84,7 @@ func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.itemsLock.RUnlock()
|
||||
|
||||
// Make this get function non-blocking for multiple readers.
|
||||
go func() {
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(ent)
|
||||
c.evictListLock.Unlock()
|
||||
}()
|
||||
|
||||
c.getChan <- ent
|
||||
return ent.value, true
|
||||
}
|
||||
c.itemsLock.RUnlock()
|
||||
@@ -133,3 +135,12 @@ func (c *LRU[K, V]) removeElement(e *entry[K, V]) {
|
||||
c.onEvict(e.key, e.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *LRU[K, V]) handleGetRequests() {
|
||||
for {
|
||||
entry := <-c.getChan
|
||||
c.evictListLock.Lock()
|
||||
c.evictList.moveToFront(entry)
|
||||
c.evictListLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("//tools:go_image.bzl", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
@@ -46,7 +46,6 @@ go_library(
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
@@ -93,25 +92,6 @@ container_bundle(
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
@@ -126,13 +106,6 @@ docker_push(
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -25,9 +25,10 @@ var (
|
||||
Usage: "Number of total skip slot to fallback from using relay/builder to local execution engine for block construction in last epoch rolling window",
|
||||
Value: 8,
|
||||
}
|
||||
LocalBlockValueBoost = &cli.Float64Flag{
|
||||
// LocalBlockValueBoost sets a percentage boost for local block construction while using a custom builder.
|
||||
LocalBlockValueBoost = &cli.Uint64Flag{
|
||||
Name: "local-block-value-boost",
|
||||
Usage: "A percentage boost for local block construction. This is used to prioritize local block construction over relay/builder block construction" +
|
||||
Usage: "A percentage boost for local block construction as a Uint64. This is used to prioritize local block construction over relay/builder block construction" +
|
||||
"Boost is an additional percentage to multiple local block value. Use builder block if: builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)",
|
||||
}
|
||||
// ExecutionEngineEndpoint provides an HTTP access endpoint to connect to an execution client on the execution layer
|
||||
|
||||
@@ -2,7 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("//tools:go_image.bzl", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
@@ -69,23 +68,6 @@ container_bundle(
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
@@ -100,13 +82,6 @@ docker_push(
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "prysmctl",
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -2,7 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("//tools:go_image.bzl", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
@@ -88,25 +87,6 @@ container_bundle(
|
||||
visibility = ["//validator:__pkg__"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
visibility = ["//validator:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/validator:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/validator:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-validator:latest-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-validator:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//validator:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
@@ -121,13 +101,6 @@ docker_push(
|
||||
visibility = ["//validator:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
visibility = ["//validator:__pkg__"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "validator",
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -40,8 +40,7 @@ func TestLoadFlagsFromConfig_PreProcessing_Web3signer(t *testing.T) {
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
|
||||
require.Equal(t, false, cliCtx.IsSet(Web3SignerPublicValidatorKeysFlag.Name))
|
||||
require.Equal(t, true, cliCtx.IsSet(Web3SignerPublicValidatorKeysFlag.Name))
|
||||
|
||||
require.Equal(t, strings.Join([]string{pubkey1, pubkey2}, ","),
|
||||
strings.Join(cliCtx.StringSlice(Web3SignerPublicValidatorKeysFlag.Name), ","))
|
||||
|
||||
@@ -230,9 +230,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(disableBuildBlockParallel)
|
||||
cfg.BuildBlockParallel = false
|
||||
}
|
||||
if ctx.IsSet(aggregateParallel.Name) {
|
||||
logEnabled(aggregateParallel)
|
||||
cfg.AggregateParallel = true
|
||||
cfg.AggregateParallel = true
|
||||
if ctx.IsSet(disableAggregateParallel.Name) {
|
||||
logEnabled(disableAggregateParallel)
|
||||
cfg.AggregateParallel = false
|
||||
}
|
||||
if ctx.IsSet(disableResourceManager.Name) {
|
||||
logEnabled(disableResourceManager)
|
||||
|
||||
@@ -38,6 +38,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedAggregateParallel = &cli.BoolFlag{
|
||||
Name: "aggregate-parallel",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
@@ -48,6 +53,7 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedDisableGossipBatchAggregation,
|
||||
deprecatedBuildBlockParallel,
|
||||
deprecatedEnableRegistrationCache,
|
||||
deprecatedAggregateParallel,
|
||||
}
|
||||
|
||||
// deprecatedBeaconFlags contains flags that are still used by other components
|
||||
|
||||
@@ -155,9 +155,9 @@ var (
|
||||
Usage: "A temporary flag for disabling the validator registration cache instead of using the db. note: registrations do not clear on restart while using the db",
|
||||
}
|
||||
|
||||
aggregateParallel = &cli.BoolFlag{
|
||||
Name: "aggregate-parallel",
|
||||
Usage: "Enables parallel aggregation of attestations",
|
||||
disableAggregateParallel = &cli.BoolFlag{
|
||||
Name: "disable-aggregate-parallel",
|
||||
Usage: "Disables parallel aggregation of attestations",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -212,7 +212,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
aggregateThirdInterval,
|
||||
disableResourceManager,
|
||||
DisableRegistrationCache,
|
||||
aggregateParallel,
|
||||
disableAggregateParallel,
|
||||
}...)...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
|
||||
@@ -15,7 +15,6 @@ go_test(
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"mainnet_test.go",
|
||||
"minimal_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
|
||||
@@ -49,11 +49,11 @@ go_test(
|
||||
"testnet_prater_config_test.go",
|
||||
],
|
||||
data = glob(["*.yaml"]) + [
|
||||
"testdata/e2e_config.yaml",
|
||||
"@consensus_spec//:spec_data",
|
||||
"@consensus_spec_tests_mainnet//:test_data",
|
||||
"@consensus_spec_tests_minimal//:test_data",
|
||||
"@eth2_networks//:configs",
|
||||
"testdata/e2e_config.yaml",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gotags = ["develop"],
|
||||
|
||||
@@ -2,9 +2,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["custom_types.go"],
|
||||
srcs = [
|
||||
"custom_types.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/consensus-types/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
37
consensus-types/validator/types.go
Normal file
37
consensus-types/validator/types.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type ValidatorStatus int8
|
||||
|
||||
const (
|
||||
PendingInitialized ValidatorStatus = iota
|
||||
PendingQueued
|
||||
ActiveOngoing
|
||||
ActiveExiting
|
||||
ActiveSlashed
|
||||
ExitedUnslashed
|
||||
ExitedSlashed
|
||||
WithdrawalPossible
|
||||
WithdrawalDone
|
||||
Active
|
||||
Pending
|
||||
Exited
|
||||
Withdrawal
|
||||
)
|
||||
|
||||
type SyncCommitteeSubscription struct {
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
SyncCommitteeIndices []uint64
|
||||
UntilEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
type BeaconCommitteeSubscription struct {
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
CommitteesAtSlot uint64
|
||||
Slot primitives.Slot
|
||||
IsAggregator bool
|
||||
}
|
||||
23
container/multi-value-slice/BUILD.bazel
Normal file
23
container/multi-value-slice/BUILD.bazel
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["multi_value_slice.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//container/multi-value-slice/interfaces:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["multi_value_slice_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
],
|
||||
)
|
||||
9
container/multi-value-slice/interfaces/BUILD.bazel
Normal file
9
container/multi-value-slice/interfaces/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["interfaces.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice/interfaces",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_google_uuid//:go_default_library"],
|
||||
)
|
||||
9
container/multi-value-slice/interfaces/interfaces.go
Normal file
9
container/multi-value-slice/interfaces/interfaces.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package interfaces
|
||||
|
||||
import "github.com/google/uuid"
|
||||
|
||||
// Identifiable represents an object that can be uniquely identified by its Id.
|
||||
type Identifiable interface {
|
||||
Id() uuid.UUID
|
||||
SetId(id uuid.UUID)
|
||||
}
|
||||
488
container/multi-value-slice/multi_value_slice.go
Normal file
488
container/multi-value-slice/multi_value_slice.go
Normal file
@@ -0,0 +1,488 @@
|
||||
// Package mvslice defines a multi value slice container. The purpose of the container is to be a replacement for a slice
|
||||
// in scenarios where many objects of the same type share a copy of an identical or nearly identical slice.
|
||||
// In such case using the multi value slice should result in less memory allocation because many values of the slice can be shared between objects.
|
||||
//
|
||||
// The multi value slice should be initialized by calling the Init function and passing the initial values of the slice.
|
||||
// After initializing the slice, it can be shared between object by using the Copy function.
|
||||
// Note that simply assigning the same multi value slice to several objects is not enough for it to work properly.
|
||||
// Calling Copy is required in most circumstances (an exception is when the source object has only shared values).
|
||||
//
|
||||
// s := &Slice[int, *testObject]{}
|
||||
// s.Init([]int{1, 2, 3})
|
||||
// src := &testObject{id: id1, slice: s} // id1 is some UUID
|
||||
// dst := &testObject{id: id2, slice: s} // id2 is some UUID
|
||||
// s.Copy(src, dst)
|
||||
//
|
||||
// Each Value stores a value of type V along with identifiers to objects that have this value.
|
||||
// A MultiValueItem is a slice of Value elements. A Slice contains shared items, individual items and appended items.
|
||||
//
|
||||
// You can think of a shared value as the original value (i.e. the value at the point in time when the multi value slice was constructed),
|
||||
// and of an individual value as a changed value.
|
||||
// There is no notion of a shared appended value because appended values never have an original value (appended values are empty when the slice is created).
|
||||
//
|
||||
// Whenever any of the slice’s functions (apart from Init) is called, the function needs to know which object it is dealing with.
|
||||
// This is because if an object has an individual/appended value, the function must get/set/change this particular value instead of the shared value
|
||||
// or another individual/appended value.
|
||||
//
|
||||
// The way appended items are stored is as follows. Let’s say appended items were a regular slice that is initially empty,
|
||||
// and we append an item for object0 and then append another item for object1.
|
||||
// Now we have two items in the slice, but object1 only has an item in index 1. This makes things very confusing and hard to deal with.
|
||||
// If we make appended items a []*Value, things don’t become much better.
|
||||
// It is therefore easiest to make appended items a []*MultiValueItem, which allows each object to have its own values starting at index 0
|
||||
// and not having any “gaps”.
|
||||
//
|
||||
// The Detach function should be called when an object gets garbage collected.
|
||||
// Its purpose is to clean up the slice from individual/appended values of the collected object.
|
||||
// Otherwise the slice will get polluted with values for non-existing objects.
|
||||
//
|
||||
// Example diagram illustrating what happens after copying, updating and detaching:
|
||||
//
|
||||
// Create object o1 with value 10. At this point we only have a shared value.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 |
|
||||
//
|
||||
// Copy object o1 to object o2. o2 shares the value with o1, no individual value is created.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 |
|
||||
//
|
||||
// Update value of object o2 to 20. An individual value is created.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 | 20: [o2]
|
||||
//
|
||||
// Copy object o2 to object o3. The individual value's object list is updated.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 | 20: [o2,o3]
|
||||
//
|
||||
// Update value of object o3 to 30. There are two individual values now, one for o2 and one for o3.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 | 20: [o2]
|
||||
// | 30: [o3]
|
||||
//
|
||||
// Update value of object o2 to 10. o2 no longer has an individual value
|
||||
// because it got "reverted" to the original, shared value,
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 | 30: [o3]
|
||||
//
|
||||
// Detach object o3. Individual value for o3 is removed.
|
||||
//
|
||||
// ===================
|
||||
// shared | individual
|
||||
// ===================
|
||||
// 10 |
|
||||
package mvslice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/multi-value-slice/interfaces"
|
||||
)
|
||||
|
||||
// MultiValueSlice defines an abstraction over all concrete implementations of the generic Slice.
|
||||
type MultiValueSlice[O interfaces.Identifiable] interface {
|
||||
Len(obj O) uuid.UUID
|
||||
}
|
||||
|
||||
// Value defines a single value along with one or more IDs that share this value.
|
||||
type Value[V any] struct {
|
||||
val V
|
||||
ids []uuid.UUID
|
||||
}
|
||||
|
||||
// MultiValueItem defines a collection of Value items.
|
||||
type MultiValueItem[V any] struct {
|
||||
Values []*Value[V]
|
||||
}
|
||||
|
||||
// Slice is the main component of the multi-value slice data structure. It has two type parameters:
|
||||
// - V comparable - the type of values stored the slice. The constraint is required
|
||||
// because certain operations (e.g. updating, appending) have to compare values against each other.
|
||||
// - O interfaces.Identifiable - the type of objects sharing the slice. The constraint is required
|
||||
// because we need a way to compare objects against each other in order to know which objects
|
||||
// values should be accessed.
|
||||
type Slice[V comparable, O interfaces.Identifiable] struct {
|
||||
sharedItems []V
|
||||
individualItems map[uint64]*MultiValueItem[V]
|
||||
appendedItems []*MultiValueItem[V]
|
||||
cachedLengths map[uuid.UUID]int
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// Init initializes the slice with sensible defaults. Input values are assigned to shared items.
|
||||
func (s *Slice[V, O]) Init(items []V) {
|
||||
s.sharedItems = items
|
||||
s.individualItems = map[uint64]*MultiValueItem[V]{}
|
||||
s.appendedItems = []*MultiValueItem[V]{}
|
||||
s.cachedLengths = map[uuid.UUID]int{}
|
||||
}
|
||||
|
||||
// Len returns the number of items for the input object.
|
||||
func (s *Slice[V, O]) Len(obj O) int {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
l, ok := s.cachedLengths[obj.Id()]
|
||||
if !ok {
|
||||
return len(s.sharedItems)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Copy copies items between the source and destination.
|
||||
func (s *Slice[V, O]) Copy(src O, dst O) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for _, item := range s.individualItems {
|
||||
for _, v := range item.Values {
|
||||
_, found := containsId(v.ids, src.Id())
|
||||
if found {
|
||||
v.ids = append(v.ids, dst.Id())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, item := range s.appendedItems {
|
||||
found := false
|
||||
for _, v := range item.Values {
|
||||
_, found = containsId(v.ids, src.Id())
|
||||
if found {
|
||||
v.ids = append(v.ids, dst.Id())
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// This is an optimization. If we didn't find an appended item at index i,
|
||||
// then all larger indices don't have an appended item for the object either.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
srcLen, ok := s.cachedLengths[src.Id()]
|
||||
if ok {
|
||||
s.cachedLengths[dst.Id()] = srcLen
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns all items for the input object.
|
||||
func (s *Slice[V, O]) Value(obj O) []V {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
l, ok := s.cachedLengths[obj.Id()]
|
||||
if ok {
|
||||
result := make([]V, l)
|
||||
s.fillOriginalItems(obj, &result)
|
||||
|
||||
sharedLen := len(s.sharedItems)
|
||||
for i, item := range s.appendedItems {
|
||||
found := false
|
||||
for _, v := range item.Values {
|
||||
_, found = containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
result[sharedLen+i] = v.val
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// This is an optimization. If we didn't find an appended item at index i,
|
||||
// then all larger indices don't have an appended item for the object either.
|
||||
return result
|
||||
}
|
||||
}
|
||||
return result
|
||||
} else {
|
||||
result := make([]V, len(s.sharedItems))
|
||||
s.fillOriginalItems(obj, &result)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// At returns the item at the requested index for the input object.
|
||||
// Appended items' indices are always larger than shared/individual items' indices.
|
||||
// We first check if the index is within the length of shared items.
|
||||
// If it is, then we return an individual value at that index - if it exists - or a shared value otherwise.
|
||||
// If the index is beyond the length of shared values, it is an appended item and that's what gets returned.
|
||||
func (s *Slice[V, O]) At(obj O, index uint64) (V, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
if index >= uint64(len(s.sharedItems)+len(s.appendedItems)) {
|
||||
var def V
|
||||
return def, fmt.Errorf("index %d out of bounds", index)
|
||||
}
|
||||
|
||||
isOriginal := index < uint64(len(s.sharedItems))
|
||||
if isOriginal {
|
||||
ind, ok := s.individualItems[index]
|
||||
if !ok {
|
||||
return s.sharedItems[index], nil
|
||||
}
|
||||
for _, v := range ind.Values {
|
||||
for _, id := range v.ids {
|
||||
if id == obj.Id() {
|
||||
return v.val, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.sharedItems[index], nil
|
||||
} else {
|
||||
item := s.appendedItems[index-uint64(len(s.sharedItems))]
|
||||
for _, v := range item.Values {
|
||||
for _, id := range v.ids {
|
||||
if id == obj.Id() {
|
||||
return v.val, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
var def V
|
||||
return def, fmt.Errorf("index %d out of bounds", index)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAt updates the item at the required index for the input object to the passed in value.
|
||||
func (s *Slice[V, O]) UpdateAt(obj O, index uint64, val V) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if index >= uint64(len(s.sharedItems)+len(s.appendedItems)) {
|
||||
return fmt.Errorf("index %d out of bounds", index)
|
||||
}
|
||||
|
||||
isOriginal := index < uint64(len(s.sharedItems))
|
||||
if isOriginal {
|
||||
s.updateOriginalItem(obj, index, val)
|
||||
return nil
|
||||
}
|
||||
return s.updateAppendedItem(obj, index, val)
|
||||
}
|
||||
|
||||
// Append adds a new item to the input object.
|
||||
func (s *Slice[V, O]) Append(obj O, val V) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if len(s.appendedItems) == 0 {
|
||||
s.appendedItems = append(s.appendedItems, &MultiValueItem[V]{Values: []*Value[V]{{val: val, ids: []uuid.UUID{obj.Id()}}}})
|
||||
s.cachedLengths[obj.Id()] = len(s.sharedItems) + 1
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range s.appendedItems {
|
||||
found := false
|
||||
for _, v := range item.Values {
|
||||
_, found = containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
newValue := true
|
||||
for _, v := range item.Values {
|
||||
if v.val == val {
|
||||
v.ids = append(v.ids, obj.Id())
|
||||
newValue = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if newValue {
|
||||
item.Values = append(item.Values, &Value[V]{val: val, ids: []uuid.UUID{obj.Id()}})
|
||||
}
|
||||
|
||||
l, ok := s.cachedLengths[obj.Id()]
|
||||
if ok {
|
||||
s.cachedLengths[obj.Id()] = l + 1
|
||||
} else {
|
||||
s.cachedLengths[obj.Id()] = len(s.sharedItems) + 1
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.appendedItems = append(s.appendedItems, &MultiValueItem[V]{Values: []*Value[V]{{val: val, ids: []uuid.UUID{obj.Id()}}}})
|
||||
|
||||
s.cachedLengths[obj.Id()] = s.cachedLengths[obj.Id()] + 1
|
||||
}
|
||||
|
||||
// Detach removes the input object from the multi-value slice.
|
||||
// What this means in practice is that we remove all individual and appended values for that object and clear the cached length.
|
||||
func (s *Slice[V, O]) Detach(obj O) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for i, ind := range s.individualItems {
|
||||
for vi, v := range ind.Values {
|
||||
foundIndex, found := containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
if len(v.ids) == 1 {
|
||||
if len(ind.Values) == 1 {
|
||||
delete(s.individualItems, i)
|
||||
} else {
|
||||
ind.Values = deleteElemFromSlice(ind.Values, vi)
|
||||
}
|
||||
} else {
|
||||
v.ids = deleteElemFromSlice(v.ids, foundIndex)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, item := range s.appendedItems {
|
||||
found := false
|
||||
for vi, v := range item.Values {
|
||||
var foundIndex int
|
||||
foundIndex, found = containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
if len(v.ids) == 1 {
|
||||
item.Values = deleteElemFromSlice(item.Values, vi)
|
||||
} else {
|
||||
v.ids = deleteElemFromSlice(v.ids, foundIndex)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// This is an optimization. If we didn't find an appended item at index i,
|
||||
// then all larger indices don't have an appended item for the object either.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
delete(s.cachedLengths, obj.Id())
|
||||
}
|
||||
|
||||
func (s *Slice[V, O]) fillOriginalItems(obj O, items *[]V) {
|
||||
for i, item := range s.sharedItems {
|
||||
ind, ok := s.individualItems[uint64(i)]
|
||||
if !ok {
|
||||
(*items)[i] = item
|
||||
} else {
|
||||
found := false
|
||||
for _, v := range ind.Values {
|
||||
_, found = containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
(*items)[i] = v.val
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
(*items)[i] = item
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Slice[V, O]) updateOriginalItem(obj O, index uint64, val V) {
|
||||
ind, ok := s.individualItems[index]
|
||||
if ok {
|
||||
for mvi, v := range ind.Values {
|
||||
// if we find an existing value, we remove it
|
||||
foundIndex, found := containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
if len(v.ids) == 1 {
|
||||
// There is an improvement to be made here. If len(ind.Values) == 1,
|
||||
// then after removing the item from the slice s.individualItems[i]
|
||||
// will be a useless map entry whose value is an empty slice.
|
||||
ind.Values = deleteElemFromSlice(ind.Values, mvi)
|
||||
} else {
|
||||
v.ids = deleteElemFromSlice(v.ids, foundIndex)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val == s.sharedItems[index] {
|
||||
return
|
||||
}
|
||||
|
||||
if !ok {
|
||||
s.individualItems[index] = &MultiValueItem[V]{Values: []*Value[V]{{val: val, ids: []uuid.UUID{obj.Id()}}}}
|
||||
} else {
|
||||
newValue := true
|
||||
for _, v := range ind.Values {
|
||||
if v.val == val {
|
||||
v.ids = append(v.ids, obj.Id())
|
||||
newValue = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if newValue {
|
||||
ind.Values = append(ind.Values, &Value[V]{val: val, ids: []uuid.UUID{obj.Id()}})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Slice[V, O]) updateAppendedItem(obj O, index uint64, val V) error {
|
||||
item := s.appendedItems[index-uint64(len(s.sharedItems))]
|
||||
found := false
|
||||
for vi, v := range item.Values {
|
||||
var foundIndex int
|
||||
// if we find an existing value, we remove it
|
||||
foundIndex, found = containsId(v.ids, obj.Id())
|
||||
if found {
|
||||
if len(v.ids) == 1 {
|
||||
item.Values = deleteElemFromSlice(item.Values, vi)
|
||||
} else {
|
||||
v.ids = deleteElemFromSlice(v.ids, foundIndex)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("index %d out of bounds", index)
|
||||
}
|
||||
|
||||
newValue := true
|
||||
for _, v := range item.Values {
|
||||
if v.val == val {
|
||||
v.ids = append(v.ids, obj.Id())
|
||||
newValue = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if newValue {
|
||||
item.Values = append(item.Values, &Value[V]{val: val, ids: []uuid.UUID{obj.Id()}})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func containsId(ids []uuid.UUID, wanted uuid.UUID) (int, bool) {
|
||||
for i, id := range ids {
|
||||
if id == wanted {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// deleteElemFromSlice does not relocate the slice, but it also does not preserve the order of items.
|
||||
// This is not a problem here because the order of values in a MultiValueItem and object IDs doesn't matter.
|
||||
func deleteElemFromSlice[T any](s []T, i int) []T {
|
||||
s[i] = s[len(s)-1] // Copy last element to index i.
|
||||
s = s[:len(s)-1] // Truncate slice.
|
||||
return s
|
||||
}
|
||||
664
container/multi-value-slice/multi_value_slice_test.go
Normal file
664
container/multi-value-slice/multi_value_slice_test.go
Normal file
@@ -0,0 +1,664 @@
|
||||
package mvslice
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
var (
|
||||
id1 = uuid.New()
|
||||
id2 = uuid.New()
|
||||
id999 = uuid.New()
|
||||
)
|
||||
|
||||
type testObject struct {
|
||||
id uuid.UUID
|
||||
slice *Slice[int, *testObject]
|
||||
}
|
||||
|
||||
func (o *testObject) Id() uuid.UUID {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func (o *testObject) SetId(id uuid.UUID) {
|
||||
o.id = id
|
||||
}
|
||||
|
||||
func TestLen(t *testing.T) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init([]int{1, 2, 3})
|
||||
id := uuid.New()
|
||||
s.cachedLengths[id] = 123
|
||||
t.Run("cached", func(t *testing.T) {
|
||||
assert.Equal(t, 123, s.Len(&testObject{id: id}))
|
||||
})
|
||||
t.Run("not cached", func(t *testing.T) {
|
||||
assert.Equal(t, 3, s.Len(&testObject{id: uuid.New()}))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - shared value is copied
|
||||
// - when the source object has an individual value, it is copied
|
||||
// - when the source object does not have an individual value, the shared value is copied
|
||||
// - when the source object has an appended value, it is copied
|
||||
// - when the source object does not have an appended value, nothing is copied
|
||||
// - length of destination object is cached
|
||||
|
||||
s := setup()
|
||||
src := &testObject{id: id1, slice: s}
|
||||
dst := &testObject{id: id999, slice: s}
|
||||
|
||||
s.Copy(src, dst)
|
||||
|
||||
assert.Equal(t, (*MultiValueItem[int])(nil), dst.slice.individualItems[0])
|
||||
assertIndividualFound(t, s, dst.id, 1, 1)
|
||||
assertIndividualFound(t, s, dst.id, 2, 3)
|
||||
assertIndividualFound(t, s, dst.id, 3, 1)
|
||||
assertIndividualNotFound(t, s, dst.id, 4)
|
||||
assertAppendedFound(t, s, dst.id, 0, 1)
|
||||
assertAppendedFound(t, s, dst.id, 1, 3)
|
||||
assertAppendedNotFound(t, s, dst.id, 2)
|
||||
l, ok := s.cachedLengths[id999]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 7, l)
|
||||
}
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - correct values are returned for first object
|
||||
// - correct values are returned for second object
|
||||
// - correct values are returned for an object without appended items
|
||||
|
||||
s := setup()
|
||||
first := &testObject{id: id1, slice: s}
|
||||
second := &testObject{id: id2, slice: s}
|
||||
|
||||
v := s.Value(first)
|
||||
|
||||
require.Equal(t, 7, len(v))
|
||||
assert.Equal(t, 123, v[0])
|
||||
assert.Equal(t, 1, v[1])
|
||||
assert.Equal(t, 3, v[2])
|
||||
assert.Equal(t, 1, v[3])
|
||||
assert.Equal(t, 123, v[4])
|
||||
assert.Equal(t, 1, v[5])
|
||||
assert.Equal(t, 3, v[6])
|
||||
|
||||
v = s.Value(second)
|
||||
|
||||
require.Equal(t, 8, len(v))
|
||||
assert.Equal(t, 123, v[0])
|
||||
assert.Equal(t, 2, v[1])
|
||||
assert.Equal(t, 3, v[2])
|
||||
assert.Equal(t, 123, v[3])
|
||||
assert.Equal(t, 2, v[4])
|
||||
assert.Equal(t, 2, v[5])
|
||||
assert.Equal(t, 3, v[6])
|
||||
assert.Equal(t, 2, v[7])
|
||||
|
||||
s = &Slice[int, *testObject]{}
|
||||
s.Init([]int{1, 2, 3})
|
||||
id := uuid.New()
|
||||
|
||||
v = s.Value(&testObject{id: id})
|
||||
|
||||
require.Equal(t, 3, len(v))
|
||||
assert.Equal(t, 1, v[0])
|
||||
assert.Equal(t, 2, v[1])
|
||||
assert.Equal(t, 3, v[2])
|
||||
}
|
||||
|
||||
func TestAt(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - correct values are returned for first object
|
||||
// - correct values are returned for second object
|
||||
// - ERROR when index too large in general
|
||||
// - ERROR when index not too large in general, but too large for an object
|
||||
|
||||
s := setup()
|
||||
first := &testObject{id: id1, slice: s}
|
||||
second := &testObject{id: id2, slice: s}
|
||||
|
||||
v, err := s.At(first, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 123, v)
|
||||
v, err = s.At(first, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, v)
|
||||
v, err = s.At(first, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, v)
|
||||
v, err = s.At(first, 3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, v)
|
||||
v, err = s.At(first, 4)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 123, v)
|
||||
v, err = s.At(first, 5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, v)
|
||||
v, err = s.At(first, 6)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, v)
|
||||
_, err = s.At(first, 7)
|
||||
assert.ErrorContains(t, "index 7 out of bounds", err)
|
||||
|
||||
v, err = s.At(second, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 123, v)
|
||||
v, err = s.At(second, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, v)
|
||||
v, err = s.At(second, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, v)
|
||||
v, err = s.At(second, 3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 123, v)
|
||||
v, err = s.At(second, 4)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, v)
|
||||
v, err = s.At(second, 5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, v)
|
||||
v, err = s.At(second, 6)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, v)
|
||||
v, err = s.At(second, 7)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, v)
|
||||
_, err = s.At(second, 8)
|
||||
assert.ErrorContains(t, "index 8 out of bounds", err)
|
||||
}
|
||||
|
||||
func TestUpdateAt(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - shared value is updated only for the updated object, creating a new individual value (shared value remains the same)
|
||||
// - individual value (different for both objects) is updated to a third value
|
||||
// - individual value (different for both objects) is updated to the other object's value
|
||||
// - individual value (equal for both objects) is updated
|
||||
// - individual value existing only for the updated object is updated
|
||||
// - individual value existing only for the other-object appends an item to the individual value
|
||||
// - individual value updated to the original shared value removes that individual value
|
||||
// - appended value (different for both objects) is updated to a third value
|
||||
// - appended value (different for both objects) is updated to the other object's value
|
||||
// - appended value (equal for both objects) is updated
|
||||
// - appended value existing for one object is updated
|
||||
// - ERROR when index too large in general
|
||||
// - ERROR when index not too large in general, but too large for an object
|
||||
|
||||
s := setup()
|
||||
first := &testObject{id: id1, slice: s}
|
||||
second := &testObject{id: id2, slice: s}
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 0, 999))
|
||||
assert.Equal(t, 123, s.sharedItems[0])
|
||||
assertIndividualFound(t, s, first.id, 0, 999)
|
||||
assertIndividualNotFound(t, s, second.id, 0)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 1, 999))
|
||||
assertIndividualFound(t, s, first.id, 1, 999)
|
||||
assertIndividualFound(t, s, second.id, 1, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 1, 2))
|
||||
assertIndividualFound(t, s, first.id, 1, 2)
|
||||
assertIndividualFound(t, s, second.id, 1, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 2, 999))
|
||||
assertIndividualFound(t, s, first.id, 2, 999)
|
||||
assertIndividualFound(t, s, second.id, 2, 3)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 3, 999))
|
||||
assertIndividualFound(t, s, first.id, 3, 999)
|
||||
assertIndividualNotFound(t, s, second.id, 3)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 4, 999))
|
||||
assertIndividualFound(t, s, first.id, 4, 999)
|
||||
assertIndividualFound(t, s, second.id, 4, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 4, 123))
|
||||
assertIndividualNotFound(t, s, first.id, 4)
|
||||
assertIndividualFound(t, s, second.id, 4, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 5, 999))
|
||||
assertAppendedFound(t, s, first.id, 0, 999)
|
||||
assertAppendedFound(t, s, second.id, 0, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 5, 2))
|
||||
assertAppendedFound(t, s, first.id, 0, 2)
|
||||
assertAppendedFound(t, s, second.id, 0, 2)
|
||||
|
||||
require.NoError(t, s.UpdateAt(first, 6, 999))
|
||||
assertAppendedFound(t, s, first.id, 1, 999)
|
||||
assertAppendedFound(t, s, second.id, 1, 3)
|
||||
|
||||
// we update the second object because there are no more appended items for the first object
|
||||
require.NoError(t, s.UpdateAt(second, 7, 999))
|
||||
assertAppendedNotFound(t, s, first.id, 2)
|
||||
assertAppendedFound(t, s, second.id, 2, 999)
|
||||
|
||||
assert.ErrorContains(t, "index 7 out of bounds", s.UpdateAt(first, 7, 999))
|
||||
assert.ErrorContains(t, "index 8 out of bounds", s.UpdateAt(second, 8, 999))
|
||||
}
|
||||
|
||||
func TestAppend(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - appending first item ever to the slice
|
||||
// - appending an item to an object when there is no corresponding item for the other object
|
||||
// - appending an item to an object when there is a corresponding item with same value for the other object
|
||||
// - appending an item to an object when there is a corresponding item with different value for the other object
|
||||
// - we also want to check that cached length is properly updated after every append
|
||||
|
||||
// we want to start with the simplest slice possible
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init([]int{0})
|
||||
first := &testObject{id: id1, slice: s}
|
||||
second := &testObject{id: id2, slice: s}
|
||||
|
||||
// append first value ever
|
||||
s.Append(first, 1)
|
||||
require.Equal(t, 1, len(s.appendedItems))
|
||||
assertAppendedFound(t, s, first.id, 0, 1)
|
||||
assertAppendedNotFound(t, s, second.id, 0)
|
||||
l, ok := s.cachedLengths[first.id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 2, l)
|
||||
_, ok = s.cachedLengths[second.id]
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// append one more value to the first object, so that we can test two append scenarios for the second object
|
||||
s.Append(first, 1)
|
||||
|
||||
// append the first value to the second object, equal to the value for the first object
|
||||
s.Append(second, 1)
|
||||
require.Equal(t, 2, len(s.appendedItems))
|
||||
assertAppendedFound(t, s, first.id, 0, 1)
|
||||
assertAppendedFound(t, s, second.id, 0, 1)
|
||||
l, ok = s.cachedLengths[first.id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 3, l)
|
||||
l, ok = s.cachedLengths[second.id]
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 2, l)
|
||||
|
||||
// append the first value to the second object, different than the value for the first object
|
||||
s.Append(second, 2)
|
||||
require.Equal(t, 2, len(s.appendedItems))
|
||||
assertAppendedFound(t, s, first.id, 1, 1)
|
||||
assertAppendedFound(t, s, second.id, 1, 2)
|
||||
l, ok = s.cachedLengths[first.id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 3, l)
|
||||
l, ok = s.cachedLengths[second.id]
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 3, l)
|
||||
}
|
||||
|
||||
func TestDetach(t *testing.T) {
|
||||
// What we want to check:
|
||||
// - no individual or appended items left after detaching an object
|
||||
// - length removed from cache
|
||||
|
||||
s := setup()
|
||||
obj := &testObject{id: id1, slice: s}
|
||||
|
||||
s.Detach(obj)
|
||||
|
||||
for _, item := range s.individualItems {
|
||||
found := false
|
||||
for _, v := range item.Values {
|
||||
for _, o := range v.ids {
|
||||
if o == obj.id {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, false, found)
|
||||
}
|
||||
for _, item := range s.appendedItems {
|
||||
found := false
|
||||
for _, v := range item.Values {
|
||||
for _, o := range v.ids {
|
||||
if o == obj.id {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, false, found)
|
||||
}
|
||||
_, ok := s.cachedLengths[obj.id]
|
||||
assert.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
// Share the slice between 2 objects.
|
||||
// Index 0: Shared value
|
||||
// Index 1: Different individual value
|
||||
// Index 2: Same individual value
|
||||
// Index 3: Individual value ONLY for the first object
|
||||
// Index 4: Individual value ONLY for the second object
|
||||
// Index 5: Different appended value
|
||||
// Index 6: Same appended value
|
||||
// Index 7: Appended value ONLY for the second object
|
||||
func setup() *Slice[int, *testObject] {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init([]int{123, 123, 123, 123, 123})
|
||||
s.individualItems[1] = &MultiValueItem[int]{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 1,
|
||||
ids: []uuid.UUID{id1},
|
||||
},
|
||||
{
|
||||
val: 2,
|
||||
ids: []uuid.UUID{id2},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.individualItems[2] = &MultiValueItem[int]{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 3,
|
||||
ids: []uuid.UUID{id1, id2},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.individualItems[3] = &MultiValueItem[int]{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 1,
|
||||
ids: []uuid.UUID{id1},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.individualItems[4] = &MultiValueItem[int]{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 2,
|
||||
ids: []uuid.UUID{id2},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.appendedItems = []*MultiValueItem[int]{
|
||||
{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 1,
|
||||
ids: []uuid.UUID{id1},
|
||||
},
|
||||
{
|
||||
val: 2,
|
||||
ids: []uuid.UUID{id2},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 3,
|
||||
ids: []uuid.UUID{id1, id2},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Values: []*Value[int]{
|
||||
{
|
||||
val: 2,
|
||||
ids: []uuid.UUID{id2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.cachedLengths[id1] = 7
|
||||
s.cachedLengths[id2] = 8
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func assertIndividualFound(t *testing.T, slice *Slice[int, *testObject], id uuid.UUID, itemIndex uint64, expected int) {
|
||||
found := false
|
||||
for _, v := range slice.individualItems[itemIndex].Values {
|
||||
for _, o := range v.ids {
|
||||
if o == id {
|
||||
found = true
|
||||
assert.Equal(t, expected, v.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, found)
|
||||
}
|
||||
|
||||
func assertIndividualNotFound(t *testing.T, slice *Slice[int, *testObject], id uuid.UUID, itemIndex uint64) {
|
||||
found := false
|
||||
for _, v := range slice.individualItems[itemIndex].Values {
|
||||
for _, o := range v.ids {
|
||||
if o == id {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, false, found)
|
||||
}
|
||||
|
||||
func assertAppendedFound(t *testing.T, slice *Slice[int, *testObject], id uuid.UUID, itemIndex uint64, expected int) {
|
||||
found := false
|
||||
for _, v := range slice.appendedItems[itemIndex].Values {
|
||||
for _, o := range v.ids {
|
||||
if o == id {
|
||||
found = true
|
||||
assert.Equal(t, expected, v.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, found)
|
||||
}
|
||||
|
||||
func assertAppendedNotFound(t *testing.T, slice *Slice[int, *testObject], id uuid.UUID, itemIndex uint64) {
|
||||
found := false
|
||||
for _, v := range slice.appendedItems[itemIndex].Values {
|
||||
for _, o := range v.ids {
|
||||
if o == id {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, false, found)
|
||||
}
|
||||
|
||||
func BenchmarkValue(b *testing.B) {
|
||||
const _100k = 100000
|
||||
const _1m = 1000000
|
||||
const _10m = 10000000
|
||||
|
||||
b.Run("100,000 shared items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _100k))
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(&testObject{})
|
||||
}
|
||||
})
|
||||
b.Run("100,000 equal individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _100k))
|
||||
s.individualItems[0] = &MultiValueItem[int]{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}
|
||||
objs := make([]*testObject, _100k)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[0].Values[0].ids = append(s.individualItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_100k)])
|
||||
}
|
||||
})
|
||||
b.Run("100,000 different individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _100k))
|
||||
objs := make([]*testObject, _100k)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[uint64(i)] = &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}}
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_100k)])
|
||||
}
|
||||
})
|
||||
b.Run("100,000 shared items and 100,000 equal appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _100k))
|
||||
s.appendedItems = []*MultiValueItem[int]{{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}}
|
||||
objs := make([]*testObject, _100k)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems[0].Values[0].ids = append(s.appendedItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_100k)])
|
||||
}
|
||||
})
|
||||
b.Run("100,000 shared items and 100,000 different appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _100k))
|
||||
s.appendedItems = []*MultiValueItem[int]{}
|
||||
objs := make([]*testObject, _100k)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems = append(s.appendedItems, &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}})
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_100k)])
|
||||
}
|
||||
})
|
||||
b.Run("1,000,000 shared items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _1m))
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(&testObject{})
|
||||
}
|
||||
})
|
||||
b.Run("1,000,000 equal individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _1m))
|
||||
s.individualItems[0] = &MultiValueItem[int]{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}
|
||||
objs := make([]*testObject, _1m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[0].Values[0].ids = append(s.individualItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_1m)])
|
||||
}
|
||||
})
|
||||
b.Run("1,000,000 different individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _1m))
|
||||
objs := make([]*testObject, _1m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[uint64(i)] = &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}}
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_1m)])
|
||||
}
|
||||
})
|
||||
b.Run("1,000,000 shared items and 1,000,000 equal appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _1m))
|
||||
s.appendedItems = []*MultiValueItem[int]{{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}}
|
||||
objs := make([]*testObject, _1m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems[0].Values[0].ids = append(s.appendedItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_1m)])
|
||||
}
|
||||
})
|
||||
b.Run("1,000,000 shared items and 1,000,000 different appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _1m))
|
||||
s.appendedItems = []*MultiValueItem[int]{}
|
||||
objs := make([]*testObject, _1m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems = append(s.appendedItems, &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}})
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_1m)])
|
||||
}
|
||||
})
|
||||
b.Run("10,000,000 shared items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _10m))
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(&testObject{})
|
||||
}
|
||||
})
|
||||
b.Run("10,000,000 equal individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _10m))
|
||||
s.individualItems[0] = &MultiValueItem[int]{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}
|
||||
objs := make([]*testObject, _10m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[0].Values[0].ids = append(s.individualItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_10m)])
|
||||
}
|
||||
})
|
||||
b.Run("10,000,000 different individual items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _10m))
|
||||
objs := make([]*testObject, _10m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.individualItems[uint64(i)] = &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}}
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_10m)])
|
||||
}
|
||||
})
|
||||
b.Run("10,000,000 shared items and 10,000,000 equal appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _10m))
|
||||
s.appendedItems = []*MultiValueItem[int]{{Values: []*Value[int]{{val: 999, ids: []uuid.UUID{}}}}}
|
||||
objs := make([]*testObject, _10m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems[0].Values[0].ids = append(s.appendedItems[0].Values[0].ids, id)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_10m)])
|
||||
}
|
||||
})
|
||||
b.Run("10,000,000 shared items and 10,000,000 different appended items", func(b *testing.B) {
|
||||
s := &Slice[int, *testObject]{}
|
||||
s.Init(make([]int, _10m))
|
||||
s.appendedItems = []*MultiValueItem[int]{}
|
||||
objs := make([]*testObject, _10m)
|
||||
for i := 0; i < len(objs); i++ {
|
||||
id := uuid.New()
|
||||
objs[i] = &testObject{id: id, slice: s}
|
||||
s.appendedItems = append(s.appendedItems, &MultiValueItem[int]{Values: []*Value[int]{{val: i, ids: []uuid.UUID{id}}}})
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Value(objs[rand.Intn(_10m)])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -16,15 +16,90 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/crypto/bls/blst",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:android_amd64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:android_arm64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin_amd64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin_arm64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:ios_amd64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:ios_arm64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux_arm64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//cache/nonblocking:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -37,10 +112,61 @@ go_test(
|
||||
"test_helper_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:android_amd64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:android_arm64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin_amd64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin_arm64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:ios_amd64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:ios_arm64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux_arm64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
209
deps.bzl
209
deps.bzl
@@ -1,6 +1,6 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_repository", "maybe") # gazelle:keep
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # gazelle:keep
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # gazelle:keep
|
||||
load("@prysm//tools/go:def.bzl", "go_repository", "maybe") # gazelle:keep
|
||||
|
||||
# Prysm's third party / external dependencies.
|
||||
#
|
||||
@@ -368,6 +368,13 @@ def prysm_deps():
|
||||
sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_bits_and_blooms_bitset",
|
||||
importpath = "github.com/bits-and-blooms/bitset",
|
||||
sum = "h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_bketelsen_crypt",
|
||||
importpath = "github.com/bketelsen/crypt",
|
||||
@@ -622,8 +629,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_consensys_gnark_crypto",
|
||||
importpath = "github.com/consensys/gnark-crypto",
|
||||
sum = "h1:llSLg4o9EgH3SrXky+Q5BqEYqV76NGKo07K5Ps2pIKo=",
|
||||
version = "v0.9.1-0.20230105202408-1a7a29904a7c",
|
||||
sum = "h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA=",
|
||||
version = "v0.10.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -694,8 +701,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_ipa",
|
||||
importpath = "github.com/crate-crypto/go-ipa",
|
||||
sum = "h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=",
|
||||
version = "v0.0.0-20220523130400-f11357ae11c7",
|
||||
sum = "h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=",
|
||||
version = "v0.0.0-20230601170251-1830d0757c80",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_kzg_4844",
|
||||
importpath = "github.com/crate-crypto/go-kzg-4844",
|
||||
sum = "h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -865,8 +878,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_dop251_goja",
|
||||
importpath = "github.com/dop251/goja",
|
||||
sum = "h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=",
|
||||
version = "v0.0.0-20230122112309-96b1610dd4f7",
|
||||
sum = "h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE=",
|
||||
version = "v0.0.0-20230605162241-28ee0ee714f3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_dop251_goja_nodejs",
|
||||
@@ -977,6 +990,17 @@ def prysm_deps():
|
||||
sum = "h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=",
|
||||
version = "v1.3.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_c_kzg_4844",
|
||||
build_directives = [
|
||||
"gazelle:resolve go github.com/supranational/blst/bindings/go @com_github_supranational_blst//:go_default_library",
|
||||
],
|
||||
importpath = "github.com/ethereum/c-kzg-4844",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_ethereum_c_kzg_4844.patch"],
|
||||
sum = "h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=",
|
||||
version = "v0.3.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_ethereum",
|
||||
@@ -988,8 +1012,8 @@ def prysm_deps():
|
||||
patches = [
|
||||
"//third_party:com_github_ethereum_go_ethereum_secp256k1.patch",
|
||||
],
|
||||
sum = "h1:uuBkYUJW9aY5JYi3+sqLHz+XWyo5fmn/ab9XcbtVDTU=",
|
||||
version = "v1.11.3",
|
||||
sum = "h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=",
|
||||
version = "v1.12.2",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1027,8 +1051,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_fjl_gencodec",
|
||||
importpath = "github.com/fjl/gencodec",
|
||||
sum = "h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=",
|
||||
version = "v0.0.0-20220412091415-8bb9e558978c",
|
||||
sum = "h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=",
|
||||
version = "v0.0.0-20230517082657-f9840df7b83e",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1138,8 +1162,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_gballet_go_verkle",
|
||||
importpath = "github.com/gballet/go-verkle",
|
||||
sum = "h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0=",
|
||||
version = "v0.0.0-20220902153445-097bd83b7732",
|
||||
sum = "h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0=",
|
||||
version = "v0.0.0-20230607174250-df487255f46b",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1151,8 +1175,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_gdamore_tcell_v2",
|
||||
importpath = "github.com/gdamore/tcell/v2",
|
||||
sum = "h1:b9XQrT6QGbgI7JvZOJXFNczOQeIYbo8BfeSMzt2sAV0=",
|
||||
version = "v2.5.3",
|
||||
sum = "h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg=",
|
||||
version = "v2.6.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1498,8 +1522,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_golang_snappy",
|
||||
importpath = "github.com/golang/snappy",
|
||||
sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=",
|
||||
version = "v0.0.4",
|
||||
sum = "h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=",
|
||||
version = "v0.0.5-0.20220116011046-fa5810519dcb",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_golangci_lint_1",
|
||||
@@ -1583,6 +1607,12 @@ def prysm_deps():
|
||||
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_google_subcommands",
|
||||
importpath = "github.com/google/subcommands",
|
||||
sum = "h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_google_uuid",
|
||||
@@ -1849,10 +1879,10 @@ def prysm_deps():
|
||||
version = "v0.0.0-20210917013441-d37c07cfda4e",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_big",
|
||||
importpath = "github.com/holiman/big",
|
||||
sum = "h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=",
|
||||
version = "v0.0.0-20221017200358-a027dc42d04e",
|
||||
name = "com_github_holiman_billy",
|
||||
importpath = "github.com/holiman/billy",
|
||||
sum = "h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=",
|
||||
version = "v0.0.0-20230718173358-1c7e68d277a7",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1864,15 +1894,15 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_holiman_goevmlab",
|
||||
importpath = "github.com/holiman/goevmlab",
|
||||
sum = "h1:WZmIv3jvH/3MJDtOaHyE3SCzYHchDXVP6Hlcyh/+dQw=",
|
||||
version = "v0.0.0-20221207202144-89074274e1b7",
|
||||
sum = "h1:I5Cp9Y1fugGwcNGVVc69Fmgho0fkmtDHl6cej51+PJM=",
|
||||
version = "v0.0.0-20230705203227-bf95bd5b9b75",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_holiman_uint256",
|
||||
importpath = "github.com/holiman/uint256",
|
||||
sum = "h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o=",
|
||||
version = "v1.2.1",
|
||||
sum = "h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=",
|
||||
version = "v1.2.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_hpcloud_tail",
|
||||
@@ -1961,8 +1991,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_influxdata_influxdb1_client",
|
||||
importpath = "github.com/influxdata/influxdb1-client",
|
||||
sum = "h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA=",
|
||||
version = "v0.0.0-20191209144304-8bf82d3c094d",
|
||||
sum = "h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=",
|
||||
version = "v0.0.0-20220302092344-a9ab5670611c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_influxdata_influxdb_client_go_v2",
|
||||
@@ -2245,8 +2275,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_karalabe_usb",
|
||||
importpath = "github.com/karalabe/usb",
|
||||
sum = "h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=",
|
||||
version = "v0.0.2",
|
||||
sum = "h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=",
|
||||
version = "v0.0.3-0.20230711191512-61db3e06439c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_kataras_blocks",
|
||||
@@ -2290,6 +2320,12 @@ def prysm_deps():
|
||||
sum = "h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA=",
|
||||
version = "v0.0.4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_kilic_bls12_381",
|
||||
importpath = "github.com/kilic/bls12-381",
|
||||
sum = "h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_kisielk_errcheck",
|
||||
@@ -2458,10 +2494,8 @@ def prysm_deps():
|
||||
],
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "github.com/libp2p/go-libp2p",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_libp2p_go_libp2p.patch"],
|
||||
sum = "h1:KwA7pXKXpz8hG6Cr1fMA7UkgleogcwQj0sxl5qquWRg=",
|
||||
version = "v0.27.5",
|
||||
sum = "h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI=",
|
||||
version = "v0.27.8",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_asn_util",
|
||||
@@ -2796,6 +2830,12 @@ def prysm_deps():
|
||||
sum = "h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=",
|
||||
version = "v0.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mmcloughlin_profile",
|
||||
importpath = "github.com/mmcloughlin/profile",
|
||||
sum = "h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=",
|
||||
version = "v0.1.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_modern_go_concurrent",
|
||||
@@ -3257,6 +3297,12 @@ def prysm_deps():
|
||||
sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=",
|
||||
version = "v1.1.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prashantv_gostub",
|
||||
importpath = "github.com/prashantv/gostub",
|
||||
sum = "h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prometheus_client_golang",
|
||||
@@ -3295,6 +3341,13 @@ def prysm_deps():
|
||||
sum = "h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=",
|
||||
version = "v0.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_protolambda_bls12_381_util",
|
||||
importpath = "github.com/protolambda/bls12-381-util",
|
||||
sum = "h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=",
|
||||
version = "v0.0.0-20220416220906-d8552aa452c7",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_fastssz",
|
||||
importpath = "github.com/prysmaticlabs/fastssz",
|
||||
@@ -3364,8 +3417,8 @@ def prysm_deps():
|
||||
"gazelle:exclude generate_cert.go",
|
||||
],
|
||||
importpath = "github.com/quic-go/qtls-go1-19",
|
||||
sum = "h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc86Z5U=",
|
||||
version = "v0.3.2",
|
||||
sum = "h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE=",
|
||||
version = "v0.3.3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -3374,8 +3427,8 @@ def prysm_deps():
|
||||
"gazelle:exclude generate_cert.go",
|
||||
],
|
||||
importpath = "github.com/quic-go/qtls-go1-20",
|
||||
sum = "h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=",
|
||||
version = "v0.2.2",
|
||||
sum = "h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI=",
|
||||
version = "v0.2.3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -3424,8 +3477,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_rivo_tview",
|
||||
importpath = "github.com/rivo/tview",
|
||||
sum = "h1:0nVxhPi+jdqG11c3n4zTcZQbjGy0yi60ym/6B+NITPU=",
|
||||
version = "v0.0.0-20221117065207-09f052e6ca98",
|
||||
sum = "h1:vpjWdGBgikHYD4ruBvDINMxwdh5UWVck9yOyrwFktMo=",
|
||||
version = "v0.0.0-20230330183452-5796b0cd5c1f",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_rivo_uniseg",
|
||||
@@ -3434,8 +3487,8 @@ def prysm_deps():
|
||||
"gazelle:exclude gen_properties.go",
|
||||
],
|
||||
importpath = "github.com/rivo/uniseg",
|
||||
sum = "h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=",
|
||||
version = "v0.4.3",
|
||||
sum = "h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=",
|
||||
version = "v0.4.4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_rjeczalik_notify",
|
||||
@@ -3755,8 +3808,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_spf13_cobra",
|
||||
importpath = "github.com/spf13/cobra",
|
||||
sum = "h1:O63eWlXlvyw4YdsuatjRIU6emvJ2fqz+PTdMEoxIT2s=",
|
||||
version = "v1.0.1-0.20201006035406-b97b5ead31f7",
|
||||
sum = "h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_spf13_jwalterweatherman",
|
||||
@@ -3823,15 +3876,6 @@ def prysm_deps():
|
||||
version = "v1.2.0",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_github_supranational_blst",
|
||||
urls = [
|
||||
"https://github.com/supranational/blst/archive/61758ce4e1d18e6929658ac3a29fa39ad91cd294.tar.gz",
|
||||
],
|
||||
strip_prefix = "blst-61758ce4e1d18e6929658ac3a29fa39ad91cd294",
|
||||
build_file = "//third_party:blst/blst.BUILD",
|
||||
sha256 = "acc022ddcf6181f8f402365d6382ea66c4352a10fe5490defd53b0db89739c42",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_syndtr_goleveldb",
|
||||
importpath = "github.com/syndtr/goleveldb",
|
||||
@@ -3969,8 +4013,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli_v2",
|
||||
importpath = "github.com/urfave/cli/v2",
|
||||
sum = "h1:YHDQ46s3VghFHFf1DdF+Sh7H4RqhcM+t0TmZRJx4oJY=",
|
||||
version = "v2.23.7",
|
||||
sum = "h1:zw8dSP7ghX0Gmm8vugrs6q9Ku0wzweqPyshy+syu9Gw=",
|
||||
version = "v2.25.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_urfave_negroni",
|
||||
@@ -5128,6 +5172,12 @@ def prysm_deps():
|
||||
sum = "h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=",
|
||||
version = "v2.0.0-20180705113604-9856a29383ce",
|
||||
)
|
||||
go_repository(
|
||||
name = "in_gopkg_natefinch_lumberjack_v2",
|
||||
importpath = "gopkg.in/natefinch/lumberjack.v2",
|
||||
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
|
||||
version = "v2.0.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_natefinch_npipe_v2",
|
||||
@@ -5375,14 +5425,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_crypto",
|
||||
importpath = "golang.org/x/crypto",
|
||||
sum = "h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=",
|
||||
version = "v0.7.0",
|
||||
sum = "h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=",
|
||||
version = "v0.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_exp",
|
||||
importpath = "golang.org/x/exp",
|
||||
sum = "h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=",
|
||||
version = "v0.0.0-20230321023759-10a507213a29",
|
||||
sum = "h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=",
|
||||
version = "v0.0.0-20230810033253-352e893a4cad",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_image",
|
||||
@@ -5407,15 +5457,15 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_mod",
|
||||
importpath = "golang.org/x/mod",
|
||||
sum = "h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=",
|
||||
version = "v0.10.0",
|
||||
sum = "h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=",
|
||||
version = "v0.11.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=",
|
||||
version = "v0.9.0",
|
||||
sum = "h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=",
|
||||
version = "v0.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_oauth2",
|
||||
@@ -5433,20 +5483,20 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_sync",
|
||||
importpath = "golang.org/x/sync",
|
||||
sum = "h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=",
|
||||
version = "v0.1.0",
|
||||
sum = "h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_sys",
|
||||
importpath = "golang.org/x/sys",
|
||||
sum = "h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=",
|
||||
version = "v0.7.0",
|
||||
sum = "h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=",
|
||||
version = "v0.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_term",
|
||||
importpath = "golang.org/x/term",
|
||||
sum = "h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=",
|
||||
version = "v0.7.0",
|
||||
sum = "h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=",
|
||||
version = "v0.8.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -5458,14 +5508,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_time",
|
||||
importpath = "golang.org/x/time",
|
||||
sum = "h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=",
|
||||
version = "v0.0.0-20220922220347-f3bd1da661af",
|
||||
sum = "h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_tools",
|
||||
importpath = "golang.org/x/tools",
|
||||
sum = "h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=",
|
||||
version = "v0.8.0",
|
||||
sum = "h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=",
|
||||
version = "v0.9.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -5508,8 +5558,8 @@ def prysm_deps():
|
||||
"gazelle:go_visibility @prysm//runtime/maxprocs:__pkg__",
|
||||
],
|
||||
importpath = "go.uber.org/automaxprocs",
|
||||
sum = "h1:II28aZoGdaglS5vVNnspf28lnZpXScxtIozx1lAjdb0=",
|
||||
version = "v1.3.0",
|
||||
sum = "h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=",
|
||||
version = "v1.5.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_uber_go_dig",
|
||||
@@ -5548,3 +5598,12 @@ def prysm_deps():
|
||||
sum = "h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=",
|
||||
version = "v1.24.0",
|
||||
)
|
||||
http_archive(
|
||||
name = "com_github_supranational_blst",
|
||||
urls = [
|
||||
"https://github.com/supranational/blst/archive/3dd0f804b1819e5d03fb22ca2e6fac105932043a.tar.gz",
|
||||
],
|
||||
strip_prefix = "blst-3dd0f804b1819e5d03fb22ca2e6fac105932043a",
|
||||
build_file = "//third_party:blst/blst.BUILD",
|
||||
sha256 = "132124c074e59ead77e1828cc54b587a182ea67b781b72198e802af4696d78fe",
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build fuzz && go1.18
|
||||
//go:build go1.18
|
||||
|
||||
package ssz_test
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user