mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
38 Commits
testStuff1
...
v3.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c6fa65f7b | ||
|
|
eaa2566e90 | ||
|
|
6957f0637f | ||
|
|
01b1f15bdf | ||
|
|
b787fd877a | ||
|
|
2c89ce810d | ||
|
|
e687fff922 | ||
|
|
5e2498be7e | ||
|
|
76f958710f | ||
|
|
1775cf89c6 | ||
|
|
8fecfaee48 | ||
|
|
f089405d2f | ||
|
|
029c81a2e4 | ||
|
|
56c48b4971 | ||
|
|
20ed47a107 | ||
|
|
e30471f1a0 | ||
|
|
3b38765a2d | ||
|
|
b60e508c89 | ||
|
|
a65c670f5e | ||
|
|
4af7d8230a | ||
|
|
27733969f7 | ||
|
|
e70fe1c9fd | ||
|
|
9b3a834437 | ||
|
|
d815fa8f21 | ||
|
|
ac3079f8cd | ||
|
|
cb8f6423e0 | ||
|
|
515e7c959f | ||
|
|
82bbfce524 | ||
|
|
95430ddb57 | ||
|
|
21b7861d37 | ||
|
|
c1e7afa201 | ||
|
|
dfa400d4a1 | ||
|
|
b04c28b30c | ||
|
|
ed07359573 | ||
|
|
25d87dd27b | ||
|
|
a9ccabf6c9 | ||
|
|
2377d6d6ea | ||
|
|
100ca0ebaf |
5
.github/actions/gofmt/Dockerfile
vendored
5
.github/actions/gofmt/Dockerfile
vendored
@@ -1,5 +0,0 @@
|
||||
FROM cytopia/gofmt
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
12
.github/actions/gofmt/action.yml
vendored
12
.github/actions/gofmt/action.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: 'Gofmt checker'
|
||||
description: 'Checks that all project files have been properly formatted.'
|
||||
inputs:
|
||||
path:
|
||||
description: 'Path to check'
|
||||
required: true
|
||||
default: './'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
args:
|
||||
- ${{ inputs.path }}
|
||||
15
.github/actions/gofmt/entrypoint.sh
vendored
15
.github/actions/gofmt/entrypoint.sh
vendored
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh -l
|
||||
set -e
|
||||
|
||||
cd $GITHUB_WORKSPACE
|
||||
|
||||
# Check if any files are not formatted.
|
||||
nonformatted="$(gofmt -l $1 2>&1)"
|
||||
|
||||
# Return if `go fmt` passes.
|
||||
[ -z "$nonformatted" ] && exit 0
|
||||
|
||||
# Notify of issues with formatting.
|
||||
echo "Following files need to be properly formatted:"
|
||||
echo "$nonformatted"
|
||||
exit 1
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -18,18 +18,6 @@ jobs:
|
||||
id: gomodtidy
|
||||
uses: ./.github/actions/gomodtidy
|
||||
|
||||
- name: Gofmt checker
|
||||
id: gofmt
|
||||
uses: ./.github/actions/gofmt
|
||||
with:
|
||||
path: ./
|
||||
|
||||
- name: GoImports checker
|
||||
id: goimports
|
||||
uses: Jerome1337/goimports-action@v1.0.2
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
@@ -45,7 +33,7 @@ jobs:
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
|
||||
@@ -11,6 +11,8 @@ run:
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -215,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.2.0-rc.1"
|
||||
consensus_spec_version = "v1.2.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
||||
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
||||
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
||||
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0a9c110305cbd6ebbe0d942f0f33e6ce22dd484ce4ceed277bf185a091941cde",
|
||||
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -309,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "126b615e3853e29b61f082f6c89c8bc1c38cd92fb84b0004396fc49e7acc8d9f",
|
||||
strip_prefix = "eth2-networks-f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935.tar.gz",
|
||||
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
|
||||
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
sha256 = "e0c0b5dc609b3a221e74c720f483c595441f2ad5e38bb8aa3522636039945a6f",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.1/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,10 +25,8 @@ import (
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
@@ -70,6 +68,8 @@ type HeadFetcher interface {
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
|
||||
@@ -87,7 +87,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
return nil
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
s.headLock.RLock()
|
||||
oldHeadBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -98,11 +97,21 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
|
||||
commonRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
}).Info("Chain reorg occurred")
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -139,6 +139,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -516,6 +519,29 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
|
||||
@@ -3314,6 +3314,75 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -150,11 +150,6 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.WithError(err).Error("Could not save block attestations for fork choice")
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
|
||||
@@ -76,9 +76,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 0 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
"Got %d but wanted %d", baCount, 0)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
// NewDB initializes a new DB.
|
||||
func NewDB(ctx context.Context, dirPath string, config *kv.Config) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath, config)
|
||||
func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_Backup(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
@@ -53,7 +53,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
|
||||
@@ -37,6 +37,8 @@ const (
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// The size of hash length in bytes
|
||||
hashLength = 32
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -70,11 +72,6 @@ var blockedBuckets = [][]byte{
|
||||
finalizedBlockRootsIndexBucket,
|
||||
}
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum Beacon Nodes.
|
||||
type Store struct {
|
||||
@@ -96,7 +93,7 @@ func KVStoreDatafilePath(dirPath string) string {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -113,7 +110,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestRestore(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err)
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 5000
|
||||
@@ -58,7 +58,7 @@ func TestRestore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
assert.Equal(t, kv.DatabaseFileName, files[0].Name())
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName), &kv.Config{})
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName))
|
||||
defer func() {
|
||||
require.NoError(t, restoredDb.Close())
|
||||
}()
|
||||
|
||||
@@ -21,13 +21,10 @@ const (
|
||||
// DatabaseFileName is the name of the beacon node database.
|
||||
DatabaseFileName = "slasher.db"
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum consensus.
|
||||
type Store struct {
|
||||
@@ -39,7 +36,7 @@ type Store struct {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -55,7 +52,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// SetupDB instantiates and returns database backed by key value store.
|
||||
func SetupDB(t testing.TB) db.Database {
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func SetupDB(t testing.TB) db.Database {
|
||||
|
||||
// SetupSlasherDB --
|
||||
func SetupSlasherDB(t testing.TB) iface.SlasherDatabase {
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir(), &slasherkv.Config{})
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -65,7 +65,6 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient := s.rpcClient
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
|
||||
@@ -183,11 +183,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// Releasing here the checkpoints lock because
|
||||
// AncestorRoot acquires a lock on nodes and that can
|
||||
// cause a double lock.
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -296,7 +300,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock.
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock
|
||||
// and votesLock
|
||||
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
for index, vote := range f.votes {
|
||||
// Skip if validator has been slashed
|
||||
@@ -424,6 +429,9 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -433,8 +441,6 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
|
||||
@@ -40,7 +40,8 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its children.
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -170,8 +170,11 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
)
|
||||
|
||||
// This computes validator balance delta from validator votes.
|
||||
// It returns a list of deltas that represents the difference between old balances and new balances.
|
||||
// It returns a list of deltas that represents the difference between old
|
||||
// balances and new balances. This function assumes the caller holds a lock in
|
||||
// Store.nodesLock and Store.votesLock
|
||||
func computeDeltas(
|
||||
ctx context.Context,
|
||||
count int,
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -188,11 +188,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// release the checkpoints lock here because
|
||||
// AncestorRoot takes a lock on nodes and that can lead
|
||||
// to double locks
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -285,6 +289,8 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
i1, ok := f.store.nodesIndices[r1]
|
||||
if !ok || i1 >= uint64(len(f.store.nodes)) {
|
||||
@@ -406,8 +412,12 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
if !s.viableForHead(bestNode) {
|
||||
s.allTipsAreInvalid = true
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, fEpoch, bestNode.justifiedEpoch, jEpoch)
|
||||
}
|
||||
s.allTipsAreInvalid = false
|
||||
|
||||
@@ -426,7 +436,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestNode.root, nil
|
||||
}
|
||||
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input
|
||||
// block root. This function assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
|
||||
defer span.End()
|
||||
@@ -548,14 +559,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
if slot > s.highestReceivedSlot {
|
||||
s.highestReceivedSlot = slot
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
// and its best child. For each node, it updates the weight with input delta and
|
||||
// back propagate the nodes' delta to its parents' delta. After scoring changes,
|
||||
// the best child is then updated along with the best descendant.
|
||||
// the best child is then updated along with the best descendant. This function
|
||||
// assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
@@ -900,6 +911,8 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -909,9 +922,6 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -371,9 +371,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -395,9 +393,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -467,9 +463,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -491,9 +485,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -559,7 +551,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
DisableDiscv5: cliCtx.Bool(flags.DisableDiscv5.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
})
|
||||
|
||||
@@ -35,6 +35,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/endtoend/evaluators:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
DisableDiscv5 bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
|
||||
@@ -332,6 +332,31 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs convers peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
discv5Nodes, _ = parseGenericAddrs(addrs)
|
||||
if len(discv5Nodes) == 0 {
|
||||
@@ -435,30 +460,6 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/encoder",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -21,11 +21,10 @@ import (
|
||||
type P2P interface {
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
EncodingProvider
|
||||
PubSubProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
Sender
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
@@ -59,6 +58,12 @@ type ConnectionHandler interface {
|
||||
connmgr.ConnectionGater
|
||||
}
|
||||
|
||||
// SenderEncoder allows sending functionality from libp2p as well as encoding for requests and responses.
|
||||
type SenderEncoder interface {
|
||||
EncodingProvider
|
||||
Sender
|
||||
}
|
||||
|
||||
// EncodingProvider provides p2p network encoding.
|
||||
type EncodingProvider interface {
|
||||
Encoding() encoder.NetworkEncoding
|
||||
|
||||
@@ -29,7 +29,7 @@ func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := multiAddressBuilder(addr, port)
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
|
||||
@@ -16,10 +16,22 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
|
||||
cfg := s.cfg
|
||||
listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -27,7 +39,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
|
||||
}
|
||||
listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -65,7 +77,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
}
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := multiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
@@ -90,17 +102,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
return options
|
||||
}
|
||||
|
||||
func multiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -89,7 +90,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
var err error
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
assert.NoError(t, err)
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := network.IPAddr()
|
||||
opts := svc.buildOptions(ipAddr, svc.privKey)
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -7,7 +7,10 @@ go_library(
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -107,7 +108,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
cfg.Discv5BootStrapAddr = dv5Nodes
|
||||
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
s.privKey, err = privKey(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to generate p2p private key")
|
||||
@@ -200,8 +201,8 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
if !s.cfg.NoDiscovery && !s.cfg.DisableDiscv5 {
|
||||
ipAddr := ipAddr()
|
||||
if !s.cfg.NoDiscovery {
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
listener, err := s.startDiscoveryV5(
|
||||
ipAddr,
|
||||
s.privKey,
|
||||
@@ -224,7 +225,7 @@ func (s *Service) Start() {
|
||||
s.started = true
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := peersFromStringAddrs(s.cfg.StaticPeers)
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//slasher/rpc:__pkg__",
|
||||
"//testing/util:__pkg__",
|
||||
"//validator/client:__pkg__",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -129,15 +128,6 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(metaData), nil
|
||||
}
|
||||
|
||||
// Retrieves an external ipv4 address and converts into a libp2p formatted value.
|
||||
func ipAddr() net.IP {
|
||||
ip, err := network.ExternalIP()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not get IPv4 address")
|
||||
}
|
||||
return net.ParseIP(ip)
|
||||
}
|
||||
|
||||
// Attempt to dial an address to verify its connectivity
|
||||
func verifyConnectivity(addr string, port uint, protocol string) {
|
||||
if addr != "" {
|
||||
|
||||
@@ -6,12 +6,6 @@ datadir: /var/lib/prysm/beacon
|
||||
# http-web3provider: ETH1 API endpoint, eg. http://localhost:8545 for a local geth service on the default port
|
||||
http-web3provider: http://localhost:8545
|
||||
|
||||
# fallback-web3provider: List of backup ETH1 API endpoints, used if above is not working
|
||||
# For example:
|
||||
# fallback-web3provider:
|
||||
# - https://mainnet.infura.io/v3/YOUR-PROJECT-ID
|
||||
# - https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID
|
||||
|
||||
|
||||
# Optional tuning parameters
|
||||
# For full list, see https://docs.prylabs.network/docs/prysm-usage/parameters
|
||||
|
||||
@@ -41,6 +41,29 @@ func wrapFeeRecipientsArray(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapSignedValidatorRegistrationsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*signedValidatorRegistrationsRequestJson); !ok {
|
||||
return true, nil
|
||||
}
|
||||
registrations := make([]*signedValidatorRegistrationJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(®istrations); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &signedValidatorRegistrationsRequestJson{Registrations: registrations}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapAttestationsArray(
|
||||
|
||||
@@ -68,6 +68,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/validator/sync_committee_contribution",
|
||||
"/eth/v1/validator/contribution_and_proofs",
|
||||
"/eth/v1/validator/prepare_beacon_proposer",
|
||||
"/eth/v1/validator/register_validator",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,6 +269,11 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
|
||||
}
|
||||
case "/eth/v1/validator/register_validator":
|
||||
endpoint.PostRequest = &signedValidatorRegistrationsRequestJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -773,6 +773,22 @@ type syncCommitteeContributionJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type validatorRegistrationJson struct {
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationJson struct {
|
||||
Message validatorRegistrationJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationsRequestJson struct {
|
||||
Registrations []*signedValidatorRegistrationJson `json:"registrations"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
@@ -14,29 +14,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetBeaconState returns the full beacon state for a given state ID.
|
||||
func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconState")
|
||||
defer span.End()
|
||||
|
||||
beaconSt, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
if beaconSt.Version() != version.Phase0 {
|
||||
return nil, status.Error(codes.Internal, "State has incorrect type")
|
||||
}
|
||||
protoSt, err := migration.BeaconStateToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BeaconStateResponse{
|
||||
Data: protoSt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
|
||||
@@ -140,25 +117,6 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
return ðpbv2.SSZContainer{Data: sszState, Version: ver}, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeads(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceHeadsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeads")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
|
||||
resp := ðpbv1.ForkChoiceHeadsResponse{
|
||||
Data: make([]*ethpbv1.ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
resp.Data[i] = ðpbv1.ForkChoiceHead{
|
||||
Root: headRoots[i][:],
|
||||
Slot: headSlots[i],
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (*ethpbv2.ForkChoiceHeadsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
|
||||
|
||||
@@ -17,21 +17,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestGetBeaconState(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
resp, err := server.GetBeaconState(context.Background(), ðpbv1.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestGetBeaconStateV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := dbTest.SetupDB(t)
|
||||
@@ -196,38 +181,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
expectedSlotsAndRoots := []struct {
|
||||
Slot types.Slot
|
||||
Root [32]byte
|
||||
}{{
|
||||
Slot: 0,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
}, {
|
||||
Slot: 1,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeads(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.DeepEqual(t, sr.Root[:], h.Root)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -49,6 +50,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/builder/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
@@ -267,23 +268,6 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProduceBlock requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.ProduceBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block, err := vs.v1BeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block: %v", err)
|
||||
}
|
||||
return ðpbv1.ProduceBlockResponse{Data: block}, nil
|
||||
}
|
||||
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlockResponseV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
@@ -579,6 +563,38 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistration submits validator registrations.
|
||||
func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpbv1.SubmitValidatorRegistrationsRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitValidatorRegistration")
|
||||
defer span.End()
|
||||
|
||||
if vs.V1Alpha1Server.BlockBuilder == nil || !vs.V1Alpha1Server.BlockBuilder.Configured() {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not register block builder: %v", builder.ErrNoBuilder)
|
||||
}
|
||||
var registrations []*ethpbalpha.SignedValidatorRegistrationV1
|
||||
for i, registration := range reg.Registrations {
|
||||
message := reg.Registrations[i].Message
|
||||
registrations = append(registrations, ðpbalpha.SignedValidatorRegistrationV1{
|
||||
Message: ðpbalpha.ValidatorRegistrationV1{
|
||||
FeeRecipient: message.FeeRecipient,
|
||||
GasLimit: message.GasLimit,
|
||||
Timestamp: message.Timestamp,
|
||||
Pubkey: message.Pubkey,
|
||||
},
|
||||
Signature: registration.Signature,
|
||||
})
|
||||
}
|
||||
if len(registrations) == 0 {
|
||||
return &empty.Empty{}, status.Errorf(codes.InvalidArgument, "Validator registration request is empty")
|
||||
}
|
||||
|
||||
if err := vs.V1Alpha1Server.BlockBuilder.RegisterValidator(ctx, registrations); err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not register block builder: %v", err)
|
||||
}
|
||||
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// ProduceAttestationData requests that the beacon node produces attestation data for
|
||||
// the requested committee index and slot based on the nodes current head.
|
||||
func (vs *Server) ProduceAttestationData(ctx context.Context, req *ethpbv1.ProduceAttestationDataRequest) (*ethpbv1.ProduceAttestationDataResponse, error) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
@@ -655,103 +656,6 @@ func TestSyncCommitteeDutiesLastValidEpoch(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlock(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
beaconState, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 64)
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
HeadUpdater: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
resp, err := v1Server.ProduceBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, req.Slot, resp.Data.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], resp.Data.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, resp.Data.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, resp.Data.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(resp.Data.Body.ProposerSlashings)))
|
||||
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
|
||||
for i, slash := range proposerSlashings {
|
||||
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedPropSlashings, resp.Data.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(resp.Data.Body.AttesterSlashings)))
|
||||
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
|
||||
for i, slash := range attSlashings {
|
||||
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedAttSlashings, resp.Data.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProduceBlock_SyncNotReady(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err = vs.ProduceBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceBlockV2(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
@@ -3717,3 +3621,64 @@ func TestPrepareBeaconProposer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_SubmitValidatorRegistrations(t *testing.T) {
|
||||
type args struct {
|
||||
request *ethpbv1.SubmitValidatorRegistrationsRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{
|
||||
{
|
||||
Message: ðpbv1.SubmitValidatorRegistrationsRequest_ValidatorRegistration{
|
||||
FeeRecipient: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
GasLimit: 30000000,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "Empty Request",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{},
|
||||
},
|
||||
},
|
||||
wantErr: "Validator registration request is empty",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BlockBuilder: &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
},
|
||||
BeaconDB: db,
|
||||
}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1Server,
|
||||
}
|
||||
_, err := server.SubmitValidatorRegistration(ctx, tt.args.request)
|
||||
if tt.wantErr != "" {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,31 +33,6 @@ type blockContainer struct {
|
||||
isCanonical bool
|
||||
}
|
||||
|
||||
// ListBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
// provided as the filter criteria. The server may return an empty list when
|
||||
// no blocks in their database match the filter criteria. This RPC should
|
||||
// not return NOT_FOUND. Only one filter criteria should be used.
|
||||
func (bs *Server) ListBlocks(
|
||||
ctx context.Context, req *ethpb.ListBlocksRequest,
|
||||
) (*ethpb.ListBlocksResponse, error) {
|
||||
ctrs, numBlks, nextPageToken, err := bs.listBlocks(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBeaconBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
@@ -272,23 +247,6 @@ func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksR
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
func convertToProto(ctrs []blockContainer) ([]*ethpb.BeaconBlockContainer, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainer, len(ctrs))
|
||||
for i, c := range ctrs {
|
||||
phBlk, err := c.blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
copiedRoot := c.root
|
||||
protoCtrs[i] = ðpb.BeaconBlockContainer{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: phBlk},
|
||||
BlockRoot: copiedRoot[:],
|
||||
Canonical: c.isCanonical,
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
// GetChainHead retrieves information about the head of the beacon chain from
|
||||
// the view of the beacon chain node.
|
||||
//
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -31,344 +30,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestServer_ListBlocks_NoResults(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
wanted := ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: int32(0),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}
|
||||
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Genesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Should throw an error if no genesis block is found.
|
||||
_, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.ErrorContains(t, "Could not find genesis", err)
|
||||
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
wanted := ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: blk},
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
},
|
||||
},
|
||||
NextPageToken: "0",
|
||||
TotalSize: 1,
|
||||
}
|
||||
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Genesis_MultiBlocks(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]interfaces.SignedBeaconBlock, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
// Should throw an error if more than one blk returned.
|
||||
_, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
db := dbTest.SetupDB(t)
|
||||
chain := &chainMock.ChainService{
|
||||
CanonicalRoots: map[[32]byte]bool{},
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]interfaces.SignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpb.BeaconBlockContainer, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chain.CanonicalRoots[root] = true
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: b},
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
orphanedBlk := util.NewBeaconBlock()
|
||||
orphanedBlk.Block.Slot = 300
|
||||
orphanedBlkRoot, err := orphanedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, orphanedBlk)
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
CanonicalFetcher: chain,
|
||||
}
|
||||
|
||||
root6, err := blks[6].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
req *ethpb.ListBlocksRequest
|
||||
res *ethpb.ListBlocksResponse
|
||||
}{
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 5,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[5].BlockRoot,
|
||||
Canonical: blkContainers[5].Canonical,
|
||||
},
|
||||
},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1,
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical,
|
||||
},
|
||||
},
|
||||
TotalSize: 1,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical,
|
||||
},
|
||||
},
|
||||
TotalSize: 1,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
PageSize: 100},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[0:params.BeaconConfig().SlotsPerEpoch],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[43:46],
|
||||
NextPageToken: "2",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 11},
|
||||
PageSize: 7},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[95:96],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 12},
|
||||
PageSize: 4},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[96:100],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch / 2)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 300},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 300,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: orphanedBlkRoot[:],
|
||||
Canonical: false,
|
||||
},
|
||||
},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
|
||||
res, err := bs.ListBlocks(ctx, test.req)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, res, test.res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{BeaconDB: db}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
// ensures that if any of the checkpoints are zero-valued, an error will be generated without genesis being present
|
||||
func TestServer_GetChainHead_NoGenesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
@@ -1192,47 +853,3 @@ func runListBeaconBlocksPagination(t *testing.T, orphanedBlk interfaces.SignedBe
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBeaconBlocks_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBeaconBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
@@ -754,8 +754,6 @@ func (bs *Server) GetValidatorPerformance(
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
inclusionSlots := make([]types.Slot, 0, responseCap)
|
||||
inclusionDistances := make([]types.Slot, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
@@ -789,8 +787,6 @@ func (bs *Server) GetValidatorPerformance(
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
inclusionSlots = append(inclusionSlots, summary.InclusionSlot)
|
||||
inclusionDistances = append(inclusionDistances, summary.InclusionDistance)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
@@ -806,9 +802,7 @@ func (bs *Server) GetValidatorPerformance(
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InclusionSlots: inclusionSlots, // Only populated in phase0
|
||||
InclusionDistances: inclusionDistances, // Only populated in phase 0
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1846,12 +1846,9 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -1918,12 +1915,9 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -1991,12 +1985,9 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -2065,8 +2056,6 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: nil,
|
||||
InclusionDistances: nil,
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -2135,8 +2124,6 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: nil,
|
||||
InclusionDistances: nil,
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
|
||||
@@ -162,12 +162,6 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedB
|
||||
return root[:], nil
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistration submits validator registration.
|
||||
// Deprecated: Use SubmitValidatorRegistrations instead.
|
||||
func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) (*emptypb.Empty, error) {
|
||||
return vs.SubmitValidatorRegistrations(ctx, ðpb.SignedValidatorRegistrationsV1{Messages: []*ethpb.SignedValidatorRegistrationV1{reg}})
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistrations submits validator registrations.
|
||||
func (vs *Server) SubmitValidatorRegistrations(ctx context.Context, reg *ethpb.SignedValidatorRegistrationsV1) (*emptypb.Empty, error) {
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
|
||||
@@ -48,6 +48,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
const forkDigestLength = 4
|
||||
|
||||
// writes peer's current context for the expected payload to the stream.
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ChainInfoFetcher) error {
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ForkFetcher) error {
|
||||
// The rpc context for our v2 methods is the fork-digest of
|
||||
// the relevant payload. We write the associated fork-digest(context)
|
||||
// into the stream for the payload.
|
||||
@@ -34,7 +34,7 @@ func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain
|
||||
}
|
||||
|
||||
// reads any attached context-bytes to the payload.
|
||||
func readContextFromStream(stream network.Stream, chain blockchain.ChainInfoFetcher) ([]byte, error) {
|
||||
func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
||||
rpcCtx, err := rpcContext(stream, chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -51,7 +51,7 @@ func readContextFromStream(stream network.Stream, chain blockchain.ChainInfoFetc
|
||||
}
|
||||
|
||||
// retrieve expected context depending on rpc topic schema version.
|
||||
func rpcContext(stream network.Stream, chain blockchain.ChainInfoFetcher) ([]byte, error) {
|
||||
func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
||||
_, _, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -190,7 +190,7 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Error("Could not send a chunked response")
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
|
||||
@@ -64,7 +64,7 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher
|
||||
|
||||
// ReadChunkedBlock handles each response chunk that is sent by the
|
||||
// peer and converts it into a beacon block.
|
||||
func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P, isFirstChunk bool) (interfaces.SignedBeaconBlock, error) {
|
||||
func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider, isFirstChunk bool) (interfaces.SignedBeaconBlock, error) {
|
||||
// Handle deadlines differently for first chunk
|
||||
if isFirstChunk {
|
||||
return readFirstChunkedBlock(stream, chain, p2p)
|
||||
@@ -75,7 +75,7 @@ func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoFetche
|
||||
|
||||
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
|
||||
// it.
|
||||
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P) (interfaces.SignedBeaconBlock, error) {
|
||||
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.SignedBeaconBlock, error) {
|
||||
code, errMsg, err := ReadStatusCode(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,7 +97,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoF
|
||||
|
||||
// readResponseChunk reads the response from the stream and decodes it into the
|
||||
// provided message type.
|
||||
func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P) (interfaces.SignedBeaconBlock, error) {
|
||||
func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.SignedBeaconBlock, error) {
|
||||
SetStreamReadDeadline(stream, respTimeout)
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
@@ -119,7 +119,7 @@ func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetch
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, chain blockchain.ChainInfoFetcher) (interfaces.SignedBeaconBlock, error) {
|
||||
func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfaces.SignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
|
||||
@@ -25,7 +25,7 @@ type BeaconBlockProcessor func(block interfaces.SignedBeaconBlock) error
|
||||
|
||||
// SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
|
||||
func SendBeaconBlocksByRangeRequest(
|
||||
ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID,
|
||||
ctx context.Context, chain blockchain.ForkFetcher, p2pProvider p2p.SenderEncoder, pid peer.ID,
|
||||
req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
|
||||
) ([]interfaces.SignedBeaconBlock, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(chain.CurrentSlot()))
|
||||
|
||||
@@ -563,7 +563,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
db, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
db, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err)
|
||||
bState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -178,6 +178,7 @@ func (s *Service) Start() {
|
||||
s.processPendingBlocksQueue()
|
||||
s.processPendingAttsQueue()
|
||||
s.maintainPeerStatuses()
|
||||
s.resyncIfBehind()
|
||||
|
||||
// Update sync metrics.
|
||||
async.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -64,7 +65,8 @@ func parseJWTSecretFromFile(c *cli.Context) ([]byte, error) {
|
||||
}
|
||||
|
||||
func parseExecutionChainEndpoint(c *cli.Context) (string, error) {
|
||||
if c.String(flags.ExecutionEngineEndpoint.Name) == "" {
|
||||
aliasUsed := c.IsSet(flags.HTTPWeb3ProviderFlag.Name)
|
||||
if c.String(flags.ExecutionEngineEndpoint.Name) == "" && !aliasUsed {
|
||||
return "", fmt.Errorf(
|
||||
"you need to specify %s to provide a connection endpoint to an Ethereum execution client "+
|
||||
"for your Prysm beacon node. This is a requirement for running a node. You can read more about "+
|
||||
@@ -73,5 +75,12 @@ func parseExecutionChainEndpoint(c *cli.Context) (string, error) {
|
||||
flags.ExecutionEngineEndpoint.Name,
|
||||
)
|
||||
}
|
||||
// If users only declare the deprecated flag without setting the execution engine
|
||||
// flag, we fallback to using the deprecated flag value.
|
||||
if aliasUsed && !c.IsSet(flags.ExecutionEngineEndpoint.Name) {
|
||||
log.Warnf("The %s flag has been deprecated and will be removed in a future release,"+
|
||||
"please use the execution endpoint flag instead %s", flags.HTTPWeb3ProviderFlag.Name, flags.ExecutionEngineEndpoint.Name)
|
||||
return c.String(flags.HTTPWeb3ProviderFlag.Name), nil
|
||||
}
|
||||
return c.String(flags.ExecutionEngineEndpoint.Name), nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,13 @@ var (
|
||||
Usage: "An execution client http endpoint. Can contain auth header as well in the format",
|
||||
Value: "http://localhost:8551",
|
||||
}
|
||||
// Deprecated: HTTPWeb3ProviderFlag is a deprecated flag and is an alias for the ExecutionEngineEndpoint flag.
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
Usage: "DEPRECATED: A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
|
||||
Value: "http://localhost:8551",
|
||||
Hidden: true,
|
||||
}
|
||||
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
|
||||
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests
|
||||
// to execution nodes for consensus-related calls will fail. This is not required if using an IPC connection.
|
||||
@@ -142,11 +149,6 @@ var (
|
||||
Usage: "The slot durations of when an archived state gets saved in the beaconDB.",
|
||||
Value: 2048,
|
||||
}
|
||||
// DisableDiscv5 disables running discv5.
|
||||
DisableDiscv5 = &cli.BoolFlag{
|
||||
Name: "disable-discv5",
|
||||
Usage: "Does not run the discoveryV5 dht.",
|
||||
}
|
||||
// BlockBatchLimit specifies the requested block batch size.
|
||||
BlockBatchLimit = &cli.IntFlag{
|
||||
Name: "block-batch-limit",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
// GlobalFlags specifies all the global flags for the
|
||||
// beacon node.
|
||||
type GlobalFlags struct {
|
||||
DisableDiscv5 bool
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
@@ -39,7 +38,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
log.Warn("Subscribing to All Attestation Subnets")
|
||||
cfg.SubscribeToAllSubnets = true
|
||||
}
|
||||
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
|
||||
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
var appFlags = []cli.Flag{
|
||||
flags.DepositContractFlag,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.RPCHost,
|
||||
flags.RPCPort,
|
||||
@@ -51,7 +52,6 @@ var appFlags = []cli.Flag{
|
||||
flags.MinSyncPeers,
|
||||
flags.ContractDeploymentBlock,
|
||||
flags.SetGCPercent,
|
||||
flags.DisableDiscv5,
|
||||
flags.BlockBatchLimit,
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
@@ -121,7 +121,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.RestoreSourceFileFlag,
|
||||
cmd.RestoreTargetDirFlag,
|
||||
cmd.BoltMMapInitialSizeFlag,
|
||||
cmd.ValidatorMonitorIndicesFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
checkpoint.BlockPath,
|
||||
@@ -233,6 +232,13 @@ func startNode(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
logrus.SetLevel(level)
|
||||
// Set libp2p logger to only panic logs for the info level.
|
||||
golog.SetAllLoggers(golog.LevelPanic)
|
||||
|
||||
if level == logrus.DebugLevel {
|
||||
// Set libp2p logger to error logs for the debug level.
|
||||
golog.SetAllLoggers(golog.LevelError)
|
||||
}
|
||||
if level == logrus.TraceLevel {
|
||||
// libp2p specific logging.
|
||||
golog.SetAllLoggers(golog.LevelDebug)
|
||||
|
||||
@@ -73,7 +73,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.RestoreSourceFileFlag,
|
||||
cmd.RestoreTargetDirFlag,
|
||||
cmd.BoltMMapInitialSizeFlag,
|
||||
cmd.ValidatorMonitorIndicesFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
},
|
||||
@@ -108,10 +107,10 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.GRPCGatewayPort,
|
||||
flags.GPRCGatewayCorsDomain,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.SetGCPercent,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
flags.DisableDiscv5,
|
||||
flags.BlockBatchLimit,
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.EnableDebugRPCEndpoints,
|
||||
|
||||
@@ -243,12 +243,6 @@ var (
|
||||
Usage: "Target directory of the restored database",
|
||||
Value: DefaultDataDir(),
|
||||
}
|
||||
// BoltMMapInitialSizeFlag specifies the initial size in bytes of boltdb's mmap syscall.
|
||||
BoltMMapInitialSizeFlag = &cli.IntFlag{
|
||||
Name: "bolt-mmap-initial-size",
|
||||
Usage: "Specifies the size in bytes of bolt db's mmap syscall allocation",
|
||||
Value: 536870912, // 512 Mb as a default value.
|
||||
}
|
||||
// ApiTimeoutFlag specifies the timeout value for API requests in seconds. A timeout of zero means no timeout.
|
||||
ApiTimeoutFlag = &cli.IntFlag{
|
||||
Name: "api-timeout",
|
||||
|
||||
@@ -88,7 +88,7 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
default:
|
||||
web3endpoint, err := file.ExpandPath(ctx.String(flag.Name))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not expand path for %s", web3endpoint)
|
||||
return errors.Wrapf(err, "could not expand path for %s", ctx.String(flag.Name))
|
||||
}
|
||||
if err := ctx.Set(flag.Name, web3endpoint); err != nil {
|
||||
return errors.Wrapf(err, "could not set %s to %s", flag.Name, web3endpoint)
|
||||
@@ -96,31 +96,3 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandWeb3EndpointsIfFile expands the path for --fallback-web3provider if specified as a file.
|
||||
func ExpandWeb3EndpointsIfFile(ctx *cli.Context, flags *cli.StringSliceFlag) error {
|
||||
// Return early if no flag value is set.
|
||||
if !ctx.IsSet(flags.Name) {
|
||||
return nil
|
||||
}
|
||||
rawFlags := ctx.StringSlice(flags.Name)
|
||||
for i, rawValue := range rawFlags {
|
||||
switch {
|
||||
case strings.HasPrefix(rawValue, "http://"):
|
||||
case strings.HasPrefix(rawValue, "https://"):
|
||||
case strings.HasPrefix(rawValue, "ws://"):
|
||||
case strings.HasPrefix(rawValue, "wss://"):
|
||||
default:
|
||||
web3endpoint, err := file.ExpandPath(rawValue)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not expand path for %s", rawValue)
|
||||
}
|
||||
// Given that rawFlags is a pointer this will replace the unexpanded path
|
||||
// with the expanded one. Also there is no easy way to replace the string
|
||||
// slice flag value compared to other flag types. This is why we resort to
|
||||
// replacing it like this.
|
||||
rawFlags[i] = web3endpoint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,43 +111,3 @@ func TestExpandSingleEndpointIfFile(t *testing.T) {
|
||||
require.NoError(t, ExpandSingleEndpointIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.Equal(t, curentdir+"/path.ipc", context.String(HTTPWeb3ProviderFlag.Name))
|
||||
}
|
||||
|
||||
func TestExpandWeb3EndpointsIfFile(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
HTTPWeb3ProviderFlag := &cli.StringSliceFlag{Name: "fallback-web3provider", Value: cli.NewStringSlice()}
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
// with nothing set
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// with url scheme
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "http://localhost:8545"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{"http://localhost:8545"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// reset context
|
||||
set = flag.NewFlagSet("test", 0)
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context = cli.NewContext(&app, set, nil)
|
||||
|
||||
// relative user home path
|
||||
usr, err := user.Current()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "~/relative/path.ipc"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{usr.HomeDir + "/relative/path.ipc"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
|
||||
// reset context
|
||||
set = flag.NewFlagSet("test", 0)
|
||||
set.Var(cli.NewStringSlice(), HTTPWeb3ProviderFlag.Name, "")
|
||||
context = cli.NewContext(&app, set, nil)
|
||||
|
||||
// current dir path
|
||||
curentdir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, context.Set(HTTPWeb3ProviderFlag.Name, "./path.ipc"))
|
||||
require.NoError(t, ExpandWeb3EndpointsIfFile(context, HTTPWeb3ProviderFlag))
|
||||
require.DeepEqual(t, []string{curentdir + "/path.ipc"}, context.StringSlice(HTTPWeb3ProviderFlag.Name))
|
||||
}
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -8,11 +12,98 @@ go_library(
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cmd/prysmctl/checkpoint:go_default_library",
|
||||
"//cmd/prysmctl/p2p:go_default_library",
|
||||
"//cmd/prysmctl/testnet:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
binary = ":prysmctl",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
container_image(
|
||||
name = "image_with_creation_time",
|
||||
base = "image",
|
||||
stamp = True,
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest": ":image_with_creation_time",
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
go_image_debug(
|
||||
name = "image_debug",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_debug",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest-debug": ":image_debug",
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:{DOCKER_TAG}-debug": ":image_debug",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/cmd/prysmctl:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_debug",
|
||||
bundle = ":image_bundle_debug",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
visibility = ["//cmd/prysmctl:__pkg__"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "prysmctl",
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpoint"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/testnet"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -22,4 +24,6 @@ func main() {
|
||||
|
||||
func init() {
|
||||
prysmctlCommands = append(prysmctlCommands, checkpoint.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, p2p.Commands...)
|
||||
}
|
||||
|
||||
56
cmd/prysmctl/p2p/BUILD.bazel
Normal file
56
cmd/prysmctl/p2p/BUILD.bazel
Normal file
@@ -0,0 +1,56 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"handler.go",
|
||||
"handshake.go",
|
||||
"log.go",
|
||||
"mock_chain.go",
|
||||
"p2p.go",
|
||||
"peers.go",
|
||||
"request_blocks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/p2p",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//protocol:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
230
cmd/prysmctl/p2p/client.go
Normal file
230
cmd/prysmctl/p2p/client.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
corenet "github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/forks"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// A minimal client for peering with beacon nodes over libp2p and sending p2p RPC requests for data.
|
||||
type client struct {
|
||||
host host.Host
|
||||
meta metadata.Metadata
|
||||
beaconClient pb.BeaconChainClient
|
||||
nodeClient pb.NodeClient
|
||||
}
|
||||
|
||||
func newClient(beaconEndpoints []string, clientPort uint) (*client, error) {
|
||||
ipAdd := ipAddr()
|
||||
priv, err := privKey()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up p2p private key")
|
||||
}
|
||||
meta, err := readMetadata()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up p2p metadata")
|
||||
}
|
||||
listen, err := p2p.MultiAddressBuilder(ipAdd.String(), clientPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up listening multiaddr")
|
||||
}
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priv),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
}
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
options = append(options, libp2p.Ping(false))
|
||||
h, err := libp2p.New(options...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start libp2p")
|
||||
}
|
||||
h.RemoveStreamHandler(identify.IDDelta)
|
||||
if len(beaconEndpoints) == 0 {
|
||||
return nil, errors.New("no specified beacon API endpoints")
|
||||
}
|
||||
conn, err := grpc.Dial(beaconEndpoints[0], grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beaconClient := pb.NewBeaconChainClient(conn)
|
||||
nodeClient := pb.NewNodeClient(conn)
|
||||
return &client{
|
||||
host: h,
|
||||
meta: meta,
|
||||
beaconClient: beaconClient,
|
||||
nodeClient: nodeClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *client) Close() {
|
||||
if err := c.host.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
func (c *client) MetadataSeq() uint64 {
|
||||
return c.meta.SequenceNumber()
|
||||
}
|
||||
|
||||
// Send a request to specific peer. The returned stream may be used for reading,
|
||||
// but has been closed for writing.
|
||||
// When done, the caller must Close() or Reset() on the stream.
|
||||
func (c *client) Send(
|
||||
ctx context.Context,
|
||||
message interface{},
|
||||
baseTopic string,
|
||||
pid peer.ID,
|
||||
) (corenet.Stream, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.Send")
|
||||
defer span.End()
|
||||
topic := baseTopic + c.Encoding().ProtocolSuffix()
|
||||
span.AddAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
// Apply max dial timeout when opening a new stream.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
stream, err := c.host.NewStream(ctx, pid, protocol.ID(topic))
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not open new stream")
|
||||
}
|
||||
// do not encode anything if we are sending a metadata request
|
||||
if baseTopic != p2p.RPCMetaDataTopicV1 && baseTopic != p2p.RPCMetaDataTopicV2 {
|
||||
castedMsg, ok := message.(ssz.Marshaler)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("%T does not support the ssz marshaller interface", message)
|
||||
}
|
||||
if _, err := c.Encoding().EncodeWithMaxLength(stream, castedMsg); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Close stream for writing.
|
||||
if err := stream.CloseWrite(); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
return nil, errors.Wrap(err, "could not close write")
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (c *client) retrievePeerAddressesViaRPC(ctx context.Context, beaconEndpoints []string) ([]string, error) {
|
||||
if len(beaconEndpoints) == 0 {
|
||||
return nil, errors.New("no beacon RPC endpoints specified")
|
||||
}
|
||||
peers := make([]string, 0)
|
||||
for i := 0; i < len(beaconEndpoints); i++ {
|
||||
conn, err := grpc.Dial(beaconEndpoints[i], grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeClient := pb.NewNodeClient(conn)
|
||||
hostData, err := nodeClient.GetHost(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(hostData.Addresses) == 0 {
|
||||
continue
|
||||
}
|
||||
peers = append(peers, hostData.Addresses[0]+"/p2p/"+hostData.PeerId)
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func (c *client) initializeMockChainService(ctx context.Context) (*mockChain, error) {
|
||||
genesisResp, err := c.nodeClient.GetGenesis(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currEpoch := slots.ToEpoch(slots.SinceGenesis(genesisResp.GenesisTime.AsTime()))
|
||||
currFork, err := forks.Fork(currEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mockChain{
|
||||
genesisTime: genesisResp.GenesisTime.AsTime(),
|
||||
currentFork: currFork,
|
||||
genesisValsRoot: bytesutil.ToBytes32(genesisResp.GenesisValidatorsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieves an external ipv4 address and converts into a libp2p formatted value.
|
||||
func ipAddr() net.IP {
|
||||
ip, err := network.ExternalIP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return net.ParseIP(ip)
|
||||
}
|
||||
|
||||
// Determines a private key for p2p networking from the p2p service's
|
||||
// configuration struct. If no key is found, it generates a new one.
|
||||
func privKey() (*ecdsa.PrivateKey, error) {
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
|
||||
// Adds a private key to the libp2p option if the option was provided.
|
||||
// If the private key file is missing or cannot be read, or if the
|
||||
// private key contents cannot be marshaled, an exception is thrown.
|
||||
func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
|
||||
return func(cfg *libp2p.Config) error {
|
||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(privkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cfg.Apply(libp2p.Identity(ifaceKey))
|
||||
}
|
||||
}
|
||||
|
||||
func readMetadata() (metadata.Metadata, error) {
|
||||
metaData := &pb.MetaDataV1{
|
||||
SeqNumber: 0,
|
||||
Attnets: bitfield.NewBitvector64(),
|
||||
}
|
||||
return wrapper.WrappedMetadataV1(metaData), nil
|
||||
}
|
||||
102
cmd/prysmctl/p2p/handler.go
Normal file
102
cmd/prysmctl/p2p/handler.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
corenet "github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
)
|
||||
|
||||
type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error
|
||||
|
||||
// registerRPC for a given topic with an expected protobuf message type.
|
||||
func (c *client) registerRPCHandler(baseTopic string, handle rpcHandler) {
|
||||
topic := baseTopic + c.Encoding().ProtocolSuffix()
|
||||
c.host.SetStreamHandler(protocol.ID(topic), func(stream corenet.Stream) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("error", r).Error("Panic occurred")
|
||||
log.Errorf("%s", debug.Stack())
|
||||
}
|
||||
}()
|
||||
// Resetting after closing is a no-op so defer a reset in case something goes wrong.
|
||||
// It's up to the handler to Close the stream (send an EOF) if
|
||||
// it successfully writes a response. We don't blindly call
|
||||
// Close here because we may have only written a partial
|
||||
// response.
|
||||
defer func() {
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
}()
|
||||
|
||||
log.WithField("peer", stream.Conn().RemotePeer().Pretty()).WithField("topic", string(stream.Protocol()))
|
||||
|
||||
base, ok := p2p.RPCTopicMappings[baseTopic]
|
||||
if !ok {
|
||||
log.Errorf("Could not retrieve base message for topic %s", baseTopic)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(base)
|
||||
// Copy Base
|
||||
base = reflect.New(t)
|
||||
|
||||
// since metadata requests do not have any data in the payload, we
|
||||
// do not decode anything.
|
||||
if baseTopic == p2p.RPCMetaDataTopicV1 || baseTopic == p2p.RPCMetaDataTopicV2 {
|
||||
if err := handle(context.Background(), base, stream); err != nil {
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Given we have an input argument that can be pointer or the actual object, this gives us
|
||||
// a way to check for its reflect.Kind and based on the result, we can decode
|
||||
// accordingly.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
msg, ok := reflect.New(t.Elem()).Interface().(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
log.Errorf("message of %T does not support marshaller interface", msg)
|
||||
return
|
||||
}
|
||||
if err := c.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
// Debug logs for goodbye/status errors
|
||||
if strings.Contains(topic, p2p.RPCGoodByeTopicV1) || strings.Contains(topic, p2p.RPCStatusTopicV1) {
|
||||
log.WithError(err).Debug("Could not decode goodbye stream message")
|
||||
return
|
||||
}
|
||||
log.WithError(err).Debug("Could not decode stream message")
|
||||
return
|
||||
}
|
||||
if err := handle(context.Background(), msg, stream); err != nil {
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nTyp := reflect.New(t)
|
||||
msg, ok := nTyp.Interface().(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
log.Errorf("message of %T does not support marshaller interface", msg)
|
||||
return
|
||||
}
|
||||
if err := c.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
log.WithError(err).Debug("Could not decode stream message")
|
||||
return
|
||||
}
|
||||
if err := handle(context.Background(), nTyp.Elem().Interface(), stream); err != nil {
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
81
cmd/prysmctl/p2p/handshake.go
Normal file
81
cmd/prysmctl/p2p/handshake.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/forks"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
var responseCodeSuccess = byte(0x00)
|
||||
|
||||
func (c *client) registerHandshakeHandlers() {
|
||||
c.registerRPCHandler(p2p.RPCPingTopicV1, c.pingHandler)
|
||||
c.registerRPCHandler(p2p.RPCStatusTopicV1, c.statusRPCHandler)
|
||||
c.registerRPCHandler(p2p.RPCGoodByeTopicV1, c.goodbyeHandler)
|
||||
}
|
||||
|
||||
// pingHandler reads the incoming ping rpc message from the peer.
|
||||
func (c *client) pingHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
defer closeStream(stream)
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
sq := types.SSZUint64(c.MetadataSeq())
|
||||
if _, err := c.Encoding().EncodeWithMaxLength(stream, &sq); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *client) goodbyeHandler(_ context.Context, _ interface{}, _ libp2pcore.Stream) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// statusRPCHandler reads the incoming Status RPC from the peer and responds with our version of a status message.
|
||||
// This handler will disconnect any peer that does not match our fork version.
|
||||
func (c *client) statusRPCHandler(ctx context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
defer closeStream(stream)
|
||||
chainHead, err := c.beaconClient.GetChainHead(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.nodeClient.GetGenesis(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest, err := forks.CreateForkDigest(resp.GenesisTime.AsTime(), resp.GenesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kindOfFork, err := forks.Fork(slots.ToEpoch(chainHead.HeadSlot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"genesisTime": resp.GenesisTime.AsTime(),
|
||||
"forkDigest": digest,
|
||||
"currentFork": kindOfFork.CurrentVersion,
|
||||
"previousFork": kindOfFork.PreviousVersion,
|
||||
}).Info("Responding to status RPC handler")
|
||||
status := &pb.Status{
|
||||
ForkDigest: digest[:],
|
||||
FinalizedRoot: chainHead.FinalizedBlockRoot,
|
||||
FinalizedEpoch: chainHead.FinalizedEpoch,
|
||||
HeadRoot: chainHead.HeadBlockRoot,
|
||||
HeadSlot: chainHead.HeadSlot,
|
||||
}
|
||||
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
return err
|
||||
}
|
||||
_, err = c.Encoding().EncodeWithMaxLength(stream, status)
|
||||
return err
|
||||
}
|
||||
5
cmd/prysmctl/p2p/log.go
Normal file
5
cmd/prysmctl/p2p/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package p2p
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "prysmctl-p2p")
|
||||
36
cmd/prysmctl/p2p/mock_chain.go
Normal file
36
cmd/prysmctl/p2p/mock_chain.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
type mockChain struct {
|
||||
currentFork *ethpb.Fork
|
||||
genesisValsRoot [32]byte
|
||||
genesisTime time.Time
|
||||
}
|
||||
|
||||
func (m *mockChain) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockChain) CurrentFork() *ethpb.Fork {
|
||||
return m.currentFork
|
||||
}
|
||||
|
||||
func (m *mockChain) GenesisValidatorsRoot() [32]byte {
|
||||
return m.genesisValsRoot
|
||||
}
|
||||
|
||||
func (m *mockChain) GenesisTime() time.Time {
|
||||
return m.genesisTime
|
||||
}
|
||||
|
||||
func (m *mockChain) CurrentSlot() types.Slot {
|
||||
return slots.SinceGenesis(m.genesisTime)
|
||||
}
|
||||
17
cmd/prysmctl/p2p/p2p.go
Normal file
17
cmd/prysmctl/p2p/p2p.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package p2p
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "p2p",
|
||||
Usage: "commands for interacting with beacon nodes via p2p",
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "send",
|
||||
Usage: "commands for sending p2p rpc requests to beacon nodes",
|
||||
Subcommands: []*cli.Command{requestBlocksCmd},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
28
cmd/prysmctl/p2p/peers.go
Normal file
28
cmd/prysmctl/p2p/peers.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
)
|
||||
|
||||
func (c *client) connectToPeers(ctx context.Context, peerMultiaddrs ...string) error {
|
||||
peers, err := p2p.PeersFromStringAddrs(peerMultiaddrs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(peers...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, info := range addrInfos {
|
||||
if info.ID == c.host.ID() {
|
||||
continue
|
||||
}
|
||||
if err := c.host.Connect(ctx, info); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
221
cmd/prysmctl/p2p/request_blocks.go
Normal file
221
cmd/prysmctl/p2p/request_blocks.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
corenet "github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
var requestBlocksFlags = struct {
|
||||
Peers string
|
||||
ClientPort uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Step uint64
|
||||
}{}
|
||||
|
||||
var requestBlocksCmd = &cli.Command{
|
||||
Name: "beacon-blocks-by-range",
|
||||
Usage: "Request a range of blocks from a beacon node via a p2p connection",
|
||||
Action: cliActionRequestBlocks,
|
||||
Flags: []cli.Flag{
|
||||
cmd.ChainConfigFileFlag,
|
||||
&cli.StringFlag{
|
||||
Name: "peer-multiaddrs",
|
||||
Usage: "comma-separated, peer multiaddr(s) to connect to for p2p requests",
|
||||
Destination: &requestBlocksFlags.Peers,
|
||||
Value: "",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port",
|
||||
Usage: "port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPort,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "prysm-api-endpoints",
|
||||
Usage: "comma-separated, gRPC API endpoint(s) for Prysm beacon node(s)",
|
||||
Destination: &requestBlocksFlags.APIEndpoints,
|
||||
Value: "localhost:4000",
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "start-slot",
|
||||
Usage: "start slot for blocks by range request. If unset, will use start_slot(current_epoch-1)",
|
||||
Destination: &requestBlocksFlags.StartSlot,
|
||||
Value: 0,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "count",
|
||||
Usage: "number of blocks to request, (default 32)",
|
||||
Destination: &requestBlocksFlags.Count,
|
||||
Value: 32,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "step",
|
||||
Usage: "number of steps of blocks in the range request, (default 1)",
|
||||
Destination: &requestBlocksFlags.Step,
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func cliActionRequestBlocks(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
|
||||
if err := params.LoadChainConfigFile(chainConfigFileName, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
p2ptypes.InitializeDataMaps()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
allAPIEndpoints := make([]string, 0)
|
||||
if requestBlocksFlags.APIEndpoints != "" {
|
||||
allAPIEndpoints = strings.Split(requestBlocksFlags.APIEndpoints, ",")
|
||||
}
|
||||
var err error
|
||||
c, err := newClient(allAPIEndpoints, requestBlocksFlags.ClientPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
allPeers := make([]string, 0)
|
||||
if requestBlocksFlags.Peers != "" {
|
||||
allPeers = strings.Split(requestBlocksFlags.Peers, ",")
|
||||
}
|
||||
if len(allPeers) == 0 {
|
||||
allPeers, err = c.retrievePeerAddressesViaRPC(ctx, allAPIEndpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(allPeers) == 0 {
|
||||
return errors.New("no peers found")
|
||||
}
|
||||
log.WithField("peers", allPeers).Info("List of peers")
|
||||
chain, err := c.initializeMockChainService(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.registerHandshakeHandlers()
|
||||
|
||||
c.registerRPCHandler(p2p.RPCBlocksByRangeTopicV1, func(
|
||||
ctx context.Context, i interface{}, stream libp2pcore.Stream,
|
||||
) error {
|
||||
return nil
|
||||
})
|
||||
c.registerRPCHandler(p2p.RPCBlocksByRangeTopicV2, func(
|
||||
ctx context.Context, i interface{}, stream libp2pcore.Stream,
|
||||
) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := c.connectToPeers(ctx, allPeers...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startSlot := types.Slot(requestBlocksFlags.StartSlot)
|
||||
var headSlot *types.Slot
|
||||
if startSlot == 0 {
|
||||
headResp, err := c.beaconClient.GetChainHead(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
startSlot, err = slots.EpochStart(headResp.HeadEpoch.Sub(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headSlot = &headResp.HeadSlot
|
||||
}
|
||||
|
||||
// Submit requests.
|
||||
for _, pr := range c.host.Peerstore().Peers() {
|
||||
if pr.String() == c.host.ID().String() {
|
||||
continue
|
||||
}
|
||||
req := &pb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: startSlot,
|
||||
Count: requestBlocksFlags.Count,
|
||||
Step: requestBlocksFlags.Step,
|
||||
}
|
||||
fields := logrus.Fields{
|
||||
"startSlot": startSlot,
|
||||
"count": requestBlocksFlags.Count,
|
||||
"step": requestBlocksFlags.Step,
|
||||
"peer": pr.String(),
|
||||
}
|
||||
if headSlot != nil {
|
||||
fields["headSlot"] = *headSlot
|
||||
}
|
||||
log.WithFields(fields).Info("Sending blocks by range p2p request to peer")
|
||||
start := time.Now()
|
||||
blocks, err := sync.SendBeaconBlocksByRangeRequest(
|
||||
ctx,
|
||||
chain,
|
||||
c,
|
||||
pr,
|
||||
req,
|
||||
nil, /* no extra block processing */
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end := time.Since(start)
|
||||
totalExecutionBlocks := 0
|
||||
for _, blk := range blocks {
|
||||
exec, err := blk.Block().Body().Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensusblocks.ErrUnsupportedGetter):
|
||||
continue
|
||||
case err != nil:
|
||||
log.WithError(err).Error("Could not read execution data from block body")
|
||||
continue
|
||||
default:
|
||||
}
|
||||
_, err = exec.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensusblocks.ErrUnsupportedGetter):
|
||||
continue
|
||||
case err != nil:
|
||||
log.WithError(err).Error("Could not read transactions block execution payload")
|
||||
continue
|
||||
default:
|
||||
}
|
||||
totalExecutionBlocks++
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"numBlocks": len(blocks),
|
||||
"peer": pr.String(),
|
||||
"timeFromSendingToProcessingResponse": end,
|
||||
"totalBlocksWithExecutionPayloads": totalExecutionBlocks,
|
||||
}).Info("Received blocks from peer")
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func closeStream(stream corenet.Stream) {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
34
cmd/prysmctl/testnet/BUILD.bazel
Normal file
34
cmd/prysmctl/testnet/BUILD.bazel
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"generate_genesis.go",
|
||||
"testnet.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/testnet",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"@com_github_ghodss_yaml//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["generate_genesis_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//crypto/bls:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
265
cmd/prysmctl/testnet/generate_genesis.go
Normal file
265
cmd/prysmctl/testnet/generate_genesis.go
Normal file
@@ -0,0 +1,265 @@
|
||||
package testnet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/pkg/errors"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
generateGenesisStateFlags = struct {
|
||||
DepositJsonFile string
|
||||
ChainConfigFile string
|
||||
ConfigName string
|
||||
NumValidators uint64
|
||||
GenesisTime uint64
|
||||
OutputSSZ string
|
||||
OutputJSON string
|
||||
OutputYaml string
|
||||
}{}
|
||||
log = logrus.WithField("prefix", "genesis")
|
||||
outputSSZFlag = &cli.StringFlag{
|
||||
Name: "output-ssz",
|
||||
Destination: &generateGenesisStateFlags.OutputSSZ,
|
||||
Usage: "Output filename of the SSZ marshaling of the generated genesis state",
|
||||
Value: "",
|
||||
}
|
||||
outputYamlFlag = &cli.StringFlag{
|
||||
Name: "output-yaml",
|
||||
Destination: &generateGenesisStateFlags.OutputYaml,
|
||||
Usage: "Output filename of the YAML marshaling of the generated genesis state",
|
||||
Value: "",
|
||||
}
|
||||
outputJsonFlag = &cli.StringFlag{
|
||||
Name: "output-json",
|
||||
Destination: &generateGenesisStateFlags.OutputJSON,
|
||||
Usage: "Output filename of the JSON marshaling of the generated genesis state",
|
||||
Value: "",
|
||||
}
|
||||
generateGenesisStateCmd = &cli.Command{
|
||||
Name: "generate-genesis",
|
||||
Usage: "Generate a beacon chain genesis state",
|
||||
Action: cliActionGenerateGenesisState,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "chain-config-file",
|
||||
Destination: &generateGenesisStateFlags.ChainConfigFile,
|
||||
Usage: "The path to a YAML file with chain config values",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "deposit-json-file",
|
||||
Destination: &generateGenesisStateFlags.DepositJsonFile,
|
||||
Usage: "Path to deposit_data.json file generated by the staking-deposit-cli tool for optionally specifying validators in genesis state",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-name",
|
||||
Usage: "Config kind to be used for generating the genesis state. Default: mainnet. Options include mainnet, interop, minimal, prater, ropsten, sepolia. --chain-config-file will override this flag.",
|
||||
Destination: &generateGenesisStateFlags.ConfigName,
|
||||
Value: params.MainnetName,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "num-validators",
|
||||
Usage: "Number of validators to deterministically generate in the genesis state",
|
||||
Destination: &generateGenesisStateFlags.NumValidators,
|
||||
Required: true,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "genesis-time",
|
||||
Destination: &generateGenesisStateFlags.GenesisTime,
|
||||
Usage: "Unix timestamp seconds used as the genesis time in the genesis state. If unset, defaults to now()",
|
||||
},
|
||||
outputSSZFlag,
|
||||
outputYamlFlag,
|
||||
outputJsonFlag,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Represents a json object of hex string and uint64 values for
|
||||
// validators on Ethereum. This file can be generated using the official staking-deposit-cli.
|
||||
type depositDataJSON struct {
|
||||
PubKey string `json:"pubkey"`
|
||||
Amount uint64 `json:"amount"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
DepositDataRoot string `json:"deposit_data_root"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
func cliActionGenerateGenesisState(cliCtx *cli.Context) error {
|
||||
if generateGenesisStateFlags.GenesisTime == 0 {
|
||||
log.Info("No genesis time specified, defaulting to now()")
|
||||
}
|
||||
outputJson := generateGenesisStateFlags.OutputJSON
|
||||
outputYaml := generateGenesisStateFlags.OutputYaml
|
||||
outputSSZ := generateGenesisStateFlags.OutputSSZ
|
||||
noOutputFlag := outputSSZ == "" && outputJson == "" && outputYaml == ""
|
||||
if noOutputFlag {
|
||||
return fmt.Errorf(
|
||||
"no %s, %s, %s flag(s) specified. At least one is required",
|
||||
outputJsonFlag.Name,
|
||||
outputYamlFlag.Name,
|
||||
outputSSZFlag.Name,
|
||||
)
|
||||
}
|
||||
if err := setGlobalParams(); err != nil {
|
||||
return fmt.Errorf("could not set config params: %v", err)
|
||||
}
|
||||
genesisState, err := generateGenesis(cliCtx.Context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not generate genesis state: %v", err)
|
||||
}
|
||||
if outputJson != "" {
|
||||
if err := writeToOutputFile(outputJson, genesisState, json.Marshal); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if outputYaml != "" {
|
||||
if err := writeToOutputFile(outputJson, genesisState, yaml.Marshal); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if outputSSZ != "" {
|
||||
marshalFn := func(o interface{}) ([]byte, error) {
|
||||
marshaler, ok := o.(fastssz.Marshaler)
|
||||
if !ok {
|
||||
return nil, errors.New("not a marshaler")
|
||||
}
|
||||
return marshaler.MarshalSSZ()
|
||||
}
|
||||
if err := writeToOutputFile(outputSSZ, genesisState, marshalFn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Info("Command completed")
|
||||
return nil
|
||||
}
|
||||
|
||||
func setGlobalParams() error {
|
||||
chainConfigFile := generateGenesisStateFlags.ChainConfigFile
|
||||
if chainConfigFile != "" {
|
||||
log.Infof("Specified a chain config file: %s", chainConfigFile)
|
||||
return params.LoadChainConfigFile(chainConfigFile, nil)
|
||||
}
|
||||
cfg, err := params.ByName(generateGenesisStateFlags.ConfigName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find config using name %s: %v", generateGenesisStateFlags.ConfigName, err)
|
||||
}
|
||||
return params.SetActive(cfg.Copy())
|
||||
}
|
||||
|
||||
func generateGenesis(ctx context.Context) (*ethpb.BeaconState, error) {
|
||||
genesisTime := generateGenesisStateFlags.GenesisTime
|
||||
numValidators := generateGenesisStateFlags.NumValidators
|
||||
depositJsonFile := generateGenesisStateFlags.DepositJsonFile
|
||||
if depositJsonFile != "" {
|
||||
expanded, err := file.ExpandPath(depositJsonFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputJSON, err := os.Open(expanded) // #nosec G304
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := inputJSON.Close(); err != nil {
|
||||
log.WithError(err).Printf("Could not close file %s", depositJsonFile)
|
||||
}
|
||||
}()
|
||||
log.Printf("Generating genesis state from input JSON deposit data %s", depositJsonFile)
|
||||
return genesisStateFromJSONValidators(ctx, inputJSON, genesisTime)
|
||||
}
|
||||
if numValidators == 0 {
|
||||
return nil, fmt.Errorf(
|
||||
"expected --num-validators > 0 to have been provided",
|
||||
)
|
||||
}
|
||||
// If no JSON input is specified, we create the state deterministically from interop keys.
|
||||
genesisState, _, err := interop.GenerateGenesisState(ctx, genesisTime, numValidators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return genesisState, err
|
||||
}
|
||||
|
||||
func genesisStateFromJSONValidators(ctx context.Context, r io.Reader, genesisTime uint64) (*ethpb.BeaconState, error) {
|
||||
enc, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var depositJSON []*depositDataJSON
|
||||
if err := json.Unmarshal(enc, &depositJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositDataList := make([]*ethpb.Deposit_Data, len(depositJSON))
|
||||
depositDataRoots := make([][]byte, len(depositJSON))
|
||||
for i, val := range depositJSON {
|
||||
data, dataRootBytes, err := depositJSONToDepositData(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositDataList[i] = data
|
||||
depositDataRoots[i] = dataRootBytes
|
||||
}
|
||||
beaconState, _, err := interop.GenerateGenesisStateFromDepositData(ctx, genesisTime, depositDataList, depositDataRoots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
func depositJSONToDepositData(input *depositDataJSON) (depositData *ethpb.Deposit_Data, dataRoot []byte, err error) {
|
||||
pubKeyBytes, err := hex.DecodeString(strings.TrimPrefix(input.PubKey, "0x"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
withdrawalbytes, err := hex.DecodeString(strings.TrimPrefix(input.WithdrawalCredentials, "0x"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
signatureBytes, err := hex.DecodeString(strings.TrimPrefix(input.Signature, "0x"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dataRootBytes, err := hex.DecodeString(strings.TrimPrefix(input.DepositDataRoot, "0x"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
depositData = ðpb.Deposit_Data{
|
||||
PublicKey: pubKeyBytes,
|
||||
WithdrawalCredentials: withdrawalbytes,
|
||||
Amount: input.Amount,
|
||||
Signature: signatureBytes,
|
||||
}
|
||||
dataRoot = dataRootBytes
|
||||
return
|
||||
}
|
||||
|
||||
func writeToOutputFile(
|
||||
fPath string,
|
||||
data interface{},
|
||||
marshalFn func(o interface{}) ([]byte, error),
|
||||
) error {
|
||||
encoded, err := marshalFn(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.WriteFile(fPath, encoded); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Done writing genesis state to %s", fPath)
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +1,8 @@
|
||||
package main
|
||||
package testnet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
@@ -17,8 +18,9 @@ func Test_genesisStateFromJSONValidators(t *testing.T) {
|
||||
jsonData := createGenesisDepositData(t, numKeys)
|
||||
jsonInput, err := json.Marshal(jsonData)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
genesisState, err := genesisStateFromJSONValidators(
|
||||
bytes.NewReader(jsonInput), 0, /* genesis time defaults to time.Now() */
|
||||
ctx, bytes.NewReader(jsonInput), 0, /* genesis time defaults to time.Now() */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for i, val := range genesisState.Validators {
|
||||
@@ -26,7 +28,7 @@ func Test_genesisStateFromJSONValidators(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func createGenesisDepositData(t *testing.T, numKeys int) []*DepositDataJSON {
|
||||
func createGenesisDepositData(t *testing.T, numKeys int) []*depositDataJSON {
|
||||
pubKeys := make([]bls.PublicKey, numKeys)
|
||||
privKeys := make([]bls.SecretKey, numKeys)
|
||||
for i := 0; i < numKeys; i++ {
|
||||
@@ -37,11 +39,11 @@ func createGenesisDepositData(t *testing.T, numKeys int) []*DepositDataJSON {
|
||||
}
|
||||
dataList, _, err := interop.DepositDataFromKeys(privKeys, pubKeys)
|
||||
require.NoError(t, err)
|
||||
jsonData := make([]*DepositDataJSON, numKeys)
|
||||
jsonData := make([]*depositDataJSON, numKeys)
|
||||
for i := 0; i < numKeys; i++ {
|
||||
dataRoot, err := dataList[i].HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
jsonData[i] = &DepositDataJSON{
|
||||
jsonData[i] = &depositDataJSON{
|
||||
PubKey: fmt.Sprintf("%#x", dataList[i].PublicKey),
|
||||
Amount: dataList[i].Amount,
|
||||
WithdrawalCredentials: fmt.Sprintf("%#x", dataList[i].WithdrawalCredentials),
|
||||
13
cmd/prysmctl/testnet/testnet.go
Normal file
13
cmd/prysmctl/testnet/testnet.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package testnet
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "testnet",
|
||||
Usage: "commands for dealing with Ethereum beacon chain testnets",
|
||||
Subcommands: []*cli.Command{
|
||||
generateGenesisStateCmd,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -3,6 +3,7 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
@@ -332,7 +333,7 @@ var (
|
||||
// SuggestedFeeRecipientFlag defines the address of the fee recipient.
|
||||
SuggestedFeeRecipientFlag = &cli.StringFlag{
|
||||
Name: "suggested-fee-recipient",
|
||||
Usage: "Sets ALL validators' mapping to a suggested an eth address to receive gas fees when proposing a block." +
|
||||
Usage: "Sets ALL validators' mapping to a suggested eth address to receive gas fees when proposing a block." +
|
||||
" note that this is only a suggestion when integrating with a Builder API, which may choose to specify a different fee recipient as payment for the blocks it builds." +
|
||||
" For additional setting overrides use the --" + ProposerSettingsFlag.Name + " or --" + ProposerSettingsURLFlag.Name + " Flags. ",
|
||||
Value: params.BeaconConfig().EthBurnAddressHex,
|
||||
@@ -340,16 +341,17 @@ var (
|
||||
|
||||
// EnableBuilderFlag enables the periodic validator registration API calls that will update the custom builder with validator settings.
|
||||
EnableBuilderFlag = &cli.BoolFlag{
|
||||
Name: "enable-builder",
|
||||
Usage: "Enables Builder validator registration APIs for the validator client to update settings such as fee recipient and gas limit. Note* this flag is not required if using proposer settings config file",
|
||||
Value: false,
|
||||
Name: "enable-builder",
|
||||
Usage: "Enables Builder validator registration APIs for the validator client to update settings such as fee recipient and gas limit. Note* this flag is not required if using proposer settings config file",
|
||||
Value: false,
|
||||
Aliases: []string{"enable-validator-registration"},
|
||||
}
|
||||
|
||||
// BuilderGasLimitFlag defines the gas limit for the builder to use for constructing a payload.
|
||||
BuilderGasLimitFlag = &cli.IntFlag{
|
||||
BuilderGasLimitFlag = &cli.StringFlag{
|
||||
Name: "suggested-gas-limit",
|
||||
Usage: "Sets gas limit for the builder to use for constructing a payload for all the validators",
|
||||
Value: int(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
Value: fmt.Sprint(params.BeaconConfig().DefaultBuilderGasLimit),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -99,7 +99,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.ConfigFileFlag,
|
||||
cmd.ChainConfigFileFlag,
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
cmd.BoltMMapInitialSizeFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
debug.PProfFlag,
|
||||
debug.PProfAddrFlag,
|
||||
|
||||
@@ -65,7 +65,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.ChainConfigFileFlag,
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.BoltMMapInitialSizeFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -45,7 +45,6 @@ type Flags struct {
|
||||
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
|
||||
EnablePeerScorer bool // EnablePeerScorer enables experimental peer scoring in p2p.
|
||||
WriteWalletPasswordOnWebOnboarding bool // WriteWalletPasswordOnWebOnboarding writes the password to disk after Prysm web signup.
|
||||
DisableAttestingHistoryDBCache bool // DisableAttestingHistoryDBCache for the validator client increases disk reads/writes.
|
||||
EnableDoppelGanger bool // EnableDoppelGanger enables doppelganger protection on startup for the validator.
|
||||
EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space
|
||||
// Logging related toggles.
|
||||
@@ -264,10 +263,6 @@ func ConfigureValidator(ctx *cli.Context) error {
|
||||
logEnabled(writeWalletPasswordOnWebOnboarding)
|
||||
cfg.WriteWalletPasswordOnWebOnboarding = true
|
||||
}
|
||||
if ctx.Bool(disableAttestingHistoryDBCache.Name) {
|
||||
logDisabled(disableAttestingHistoryDBCache)
|
||||
cfg.DisableAttestingHistoryDBCache = true
|
||||
}
|
||||
if ctx.Bool(attestTimely.Name) {
|
||||
logEnabled(attestTimely)
|
||||
cfg.AttestTimely = true
|
||||
|
||||
@@ -12,8 +12,84 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedBackupWebHookFlag = &cli.BoolFlag{
|
||||
Name: "enable-db-backup-webhook",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedBoltMmapFlag = &cli.StringFlag{
|
||||
Name: "bolt-mmap-initial-size",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableDiscV5Flag = &cli.BoolFlag{
|
||||
Name: "disable-discv5",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableAttHistoryCacheFlag = &cli.BoolFlag{
|
||||
Name: "disable-attesting-history-db-cache",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableVectorizedHtr = &cli.BoolFlag{
|
||||
Name: "enable-vectorized-htr",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnablePeerScorer = &cli.BoolFlag{
|
||||
Name: "enable-peer-scorer",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableForkchoiceDoublyLinkedTree = &cli.BoolFlag{
|
||||
Name: "enable-forkchoice-doubly-linked-tree",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDutyCountdown = &cli.BoolFlag{
|
||||
Name: "enable-duty-count-down",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedHeadSync = &cli.BoolFlag{
|
||||
Name: "head-sync",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedGossipBatchAggregation = &cli.BoolFlag{
|
||||
Name: "enable-gossip-batch-aggregation",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableLargerGossipHistory = &cli.BoolFlag{
|
||||
Name: "enable-larger-gossip-history",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedFallbackProvider = &cli.StringFlag{
|
||||
Name: "fallback-web3provider",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
var deprecatedFlags = []cli.Flag{
|
||||
exampleDeprecatedFeatureFlag,
|
||||
deprecatedBoltMmapFlag,
|
||||
deprecatedDisableDiscV5Flag,
|
||||
deprecatedDisableAttHistoryCacheFlag,
|
||||
deprecatedEnableVectorizedHtr,
|
||||
deprecatedEnablePeerScorer,
|
||||
deprecatedEnableForkchoiceDoublyLinkedTree,
|
||||
deprecatedDutyCountdown,
|
||||
deprecatedHeadSync,
|
||||
deprecatedGossipBatchAggregation,
|
||||
deprecatedEnableLargerGossipHistory,
|
||||
deprecatedFallbackProvider,
|
||||
}
|
||||
|
||||
var deprecatedBeaconFlags = []cli.Flag{
|
||||
deprecatedBackupWebHookFlag,
|
||||
}
|
||||
|
||||
@@ -55,11 +55,6 @@ var (
|
||||
Usage: "(Danger): Writes the wallet password to the wallet directory on completing Prysm web onboarding. " +
|
||||
"We recommend against this flag unless you are an advanced user.",
|
||||
}
|
||||
disableAttestingHistoryDBCache = &cli.BoolFlag{
|
||||
Name: "disable-attesting-history-db-cache",
|
||||
Usage: "(Danger): Disables the cache for attesting history in the validator DB, greatly increasing " +
|
||||
"disk reads and writes as well as increasing time required for attestations to be produced",
|
||||
}
|
||||
dynamicKeyReloadDebounceInterval = &cli.DurationFlag{
|
||||
Name: "dynamic-key-reload-debounce-interval",
|
||||
Usage: "(Advanced): Specifies the time duration the validator waits to reload new keys if they have " +
|
||||
@@ -127,7 +122,6 @@ var devModeFlags = []cli.Flag{}
|
||||
var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
writeWalletPasswordOnWebOnboarding,
|
||||
enableExternalSlasherProtectionFlag,
|
||||
disableAttestingHistoryDBCache,
|
||||
PraterTestnet,
|
||||
RopstenTestnet,
|
||||
SepoliaTestnet,
|
||||
@@ -144,7 +138,7 @@ var E2EValidatorFlags = []string{
|
||||
}
|
||||
|
||||
// BeaconChainFlags contains a list of all the feature flags that apply to the beacon-chain client.
|
||||
var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []cli.Flag{
|
||||
devModeFlag,
|
||||
writeSSZStateTransitionsFlag,
|
||||
disableGRPCConnectionLogging,
|
||||
@@ -162,7 +156,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableForkChoiceDoublyLinkedTree,
|
||||
disableGossipBatchAggregation,
|
||||
EnableOnlyBlindedBeaconBlocks,
|
||||
}...)
|
||||
}...)...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
var E2EBeaconChainFlags = []string{
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT"}
|
||||
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT", "EIP4844_FORK_EPOCH", "EIP4844_FORK_VERSION"}
|
||||
|
||||
func TestLoadConfigFile(t *testing.T) {
|
||||
// See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml
|
||||
|
||||
@@ -21,8 +21,8 @@ const (
|
||||
genesisForkEpoch = 0
|
||||
// Altair Fork Epoch for mainnet config.
|
||||
mainnetAltairForkEpoch = 74240 // Oct 27, 2021, 10:56:23am UTC
|
||||
// Placeholder for the merge epoch until it is decided
|
||||
mainnetBellatrixForkEpoch = math.MaxUint64
|
||||
// Bellatrix Fork Epoch for mainnet config.
|
||||
mainnetBellatrixForkEpoch = 144896 // Sept 6, 2022, 11:34:47am UTC
|
||||
)
|
||||
|
||||
var mainnetNetworkConfig = &NetworkConfig{
|
||||
@@ -248,7 +248,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
// Bellatrix
|
||||
TerminalBlockHashActivationEpoch: 18446744073709551615,
|
||||
TerminalBlockHash: [32]byte{},
|
||||
TerminalTotalDifficulty: "115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
TerminalTotalDifficulty: "58750000000000000000000", // Estimated: Sept 15, 2022
|
||||
EthBurnAddressHex: "0x0000000000000000000000000000000000000000",
|
||||
DefaultBuilderGasLimit: uint64(30000000),
|
||||
|
||||
|
||||
@@ -98,6 +98,8 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.DepositChainID = 5 // Chain ID of eth1 goerli.
|
||||
minimalConfig.DepositNetworkID = 5 // Network ID of eth1 goerli.
|
||||
minimalConfig.DepositContractAddress = "0x1234567890123456789012345678901234567890"
|
||||
// 2**256-2**10 for fake minimal network
|
||||
minimalConfig.TerminalTotalDifficulty = "115792089237316195423570985008687907853269984665640564039457584007913129638912"
|
||||
|
||||
minimalConfig.ConfigName = MinimalName
|
||||
minimalConfig.PresetBase = "minimal"
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package validator_service_config
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -25,10 +27,41 @@ type ProposerOptionPayload struct {
|
||||
// GasLimit is a number set to help the network decide on the maximum gas in each block.
|
||||
type BuilderConfig struct {
|
||||
Enabled bool `json:"enabled" yaml:"enabled"`
|
||||
GasLimit uint64 `json:"gas_limit,omitempty" yaml:"gas_limit,omitempty"`
|
||||
GasLimit Uint64 `json:"gas_limit,omitempty" yaml:"gas_limit,omitempty"`
|
||||
Relays []string `json:"relays" yaml:"relays"`
|
||||
}
|
||||
|
||||
type Uint64 uint64
|
||||
|
||||
func (u *Uint64) UnmarshalJSON(bs []byte) error {
|
||||
str := string(bs) // Parse plain numbers directly.
|
||||
if bs[0] == '"' && bs[len(bs)-1] == '"' {
|
||||
// Unwrap the quotes from string numbers.
|
||||
str = string(bs[1 : len(bs)-1])
|
||||
}
|
||||
x, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var str string
|
||||
err := unmarshal(&str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
x, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(x)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerSettings is a Prysm internal representation of the fee recipient config on the validator client.
|
||||
// ProposerSettingsPayload maps to ProposerSettings on import through the CLI.
|
||||
type ProposerSettings struct {
|
||||
|
||||
@@ -365,12 +365,12 @@ func IsInSlots(a types.Slot, b []types.Slot) bool {
|
||||
}
|
||||
|
||||
// Unique returns an array with duplicates filtered based on the type given
|
||||
func Unique(a []string) []string {
|
||||
func Unique[T comparable](a []T) []T {
|
||||
if a == nil || len(a) <= 1 {
|
||||
return a
|
||||
}
|
||||
found := map[string]bool{}
|
||||
result := make([]string, len(a))
|
||||
found := map[T]bool{}
|
||||
result := make([]T, len(a))
|
||||
end := 0
|
||||
for i := 0; i < len(a); i++ {
|
||||
if !found[a[i]] {
|
||||
|
||||
@@ -591,7 +591,11 @@ func TestIsInSlots(t *testing.T) {
|
||||
|
||||
func TestUnique(t *testing.T) {
|
||||
t.Run("string", func(t *testing.T) {
|
||||
result := slice.Unique([]string{"a", "b", "a"})
|
||||
result := slice.Unique[string]([]string{"a", "b", "a"})
|
||||
require.DeepEqual(t, []string{"a", "b"}, result)
|
||||
})
|
||||
t.Run("uint64", func(t *testing.T) {
|
||||
result := slice.Unique[uint64]([]uint64{1, 2, 1})
|
||||
require.DeepEqual(t, []uint64{1, 2}, result)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,6 +6,15 @@ import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// IPAddr gets the external ipv4 address and converts into a libp2p formatted value.
|
||||
func IPAddr() net.IP {
|
||||
ip, err := ExternalIP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return net.ParseIP(ip)
|
||||
}
|
||||
|
||||
// ExternalIPv4 returns the first IPv4 available.
|
||||
func ExternalIPv4() (string, error) {
|
||||
ips, err := ipAddrs()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user