mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
10 Commits
singleSlic
...
backfill-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c7cd813761 | ||
|
|
9b13454457 | ||
|
|
b9917807d8 | ||
|
|
e5c9387cd9 | ||
|
|
2c3b3b802a | ||
|
|
3ef3e1d13b | ||
|
|
5c00fcb84f | ||
|
|
aef22bf54e | ||
|
|
f6764fe62b | ||
|
|
07db0dc448 |
@@ -43,4 +43,12 @@ build --flaky_test_attempts=5
|
||||
|
||||
# Better caching
|
||||
build:nostamp --nostamp
|
||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
# Build metadata
|
||||
build --build_metadata=ROLE=CI
|
||||
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
|
||||
build --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
# Buildbuddy
|
||||
build --bes_results_url=https://app.buildbuddy.io/invocation/
|
||||
build --bes_backend=grpcs://remote.buildbuddy.io
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -26,14 +26,14 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.19
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -43,16 +43,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.19
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.50.1
|
||||
version: v1.52.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -346,6 +346,74 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FromProto converts a proto execution payload type to our builder
|
||||
// compatible payload type.
|
||||
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
|
||||
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return ExecutionPayload{}, err
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
}
|
||||
return ExecutionPayload{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FromProtoCapella converts a proto execution payload type for capella to our
|
||||
// builder compatible payload type.
|
||||
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
|
||||
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return ExecutionPayloadCapella{}, err
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
}
|
||||
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
|
||||
for i, w := range payload.Withdrawals {
|
||||
withdrawals[i] = Withdrawal{
|
||||
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
|
||||
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
|
||||
Address: w.Address,
|
||||
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
|
||||
}
|
||||
}
|
||||
return ExecutionPayloadCapella{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -380,6 +381,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
|
||||
@@ -25,8 +25,11 @@ var (
|
||||
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
// ErrNotCheckpoint is returned when a given checkpoint is not a
|
||||
// checkpoint in any chain known to forkchoice
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
|
||||
|
||||
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
|
||||
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
|
||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
|
||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
|
||||
//
|
||||
// Spec definition:
|
||||
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
@@ -32,7 +33,45 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState, nil
|
||||
}
|
||||
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return s.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
|
||||
// but is cheap
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute epoch start")
|
||||
}
|
||||
cachedState = transition.NextSlotState(c.Root, slot)
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
if cachedState.Slot() == slot {
|
||||
return cachedState, nil
|
||||
}
|
||||
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Do not process attestations for old non viable checkpoints otherwise
|
||||
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
|
||||
}
|
||||
if !ok {
|
||||
return nil, ErrNotCheckpoint
|
||||
}
|
||||
|
||||
// Fallback to state regeneration.
|
||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
|
||||
@@ -27,11 +27,20 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
blkWithoutState := util.NewBeaconBlock()
|
||||
blkWithoutState.Block.Slot = 0
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -42,7 +51,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
blkWithValidState := util.NewBeaconBlock()
|
||||
blkWithValidState.Block.Slot = 2
|
||||
blkWithValidState.Block.Slot = 32
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
|
||||
|
||||
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
|
||||
@@ -57,6 +66,10 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -67,11 +80,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
@@ -160,6 +168,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
@@ -167,8 +178,17 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.ErrorIs(t, ErrNotCheckpoint, err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
s2, err = service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
s1, err = service.getAttPreState(ctx, cp1)
|
||||
@@ -187,6 +207,10 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
@@ -195,11 +219,18 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
|
||||
blk := util.NewBeaconBlock()
|
||||
r1, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -209,8 +240,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = 2
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 64
|
||||
r2, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -1875,9 +1875,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
|
||||
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
|
||||
// root of an attestation's target checkpoint.
|
||||
type AttestationStateFetcher interface {
|
||||
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
|
||||
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
|
||||
}
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
@@ -37,7 +37,7 @@ type AttestationReceiver interface {
|
||||
}
|
||||
|
||||
// AttestationTargetState returns the pre state of attestation.
|
||||
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
ss, err := slots.EpochStart(target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -45,6 +45,9 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.getAttPreState(ctx, target)
|
||||
}
|
||||
|
||||
|
||||
@@ -320,7 +320,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
|
||||
}
|
||||
|
||||
// AttestationTargetState mocks AttestationTargetState method in chain service.
|
||||
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
|
||||
|
||||
// Mimick finalized deposit trie fetch.
|
||||
// Mimic finalized deposit trie fetch.
|
||||
fd := dc.FinalizedDeposits(context.Background())
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex + 1
|
||||
|
||||
@@ -141,7 +141,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// # FillFwd sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
|
||||
@@ -183,11 +183,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
currentSlot,
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
attReceivedTooLateCount.Inc()
|
||||
return attError
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
return attError
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
|
||||
|
||||
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
bal, err := state.BalanceAtIndex(proposer)
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -56,7 +57,7 @@ type ReadOnlyDatabase interface {
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -107,7 +108,7 @@ type HeadAccessDatabase interface {
|
||||
|
||||
// initialization method needed for origin checkpoint sync
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backfill.go",
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -73,6 +75,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backfill_test.go",
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
@@ -107,6 +110,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
39
beacon-chain/db/kv/backfill.go
Normal file
39
beacon-chain/db/kv/backfill.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
|
||||
defer span.End()
|
||||
bfb, err := proto.Marshal(bf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillStatusKey, bfb)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
|
||||
defer span.End()
|
||||
bf := &dbval.BackfillStatus{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
bs := bucket.Get(backfillStatusKey)
|
||||
if len(bs) == 0 {
|
||||
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
|
||||
}
|
||||
return proto.Unmarshal(bs, bf)
|
||||
})
|
||||
return bf, err
|
||||
}
|
||||
37
beacon-chain/db/kv/backfill_test.go
Normal file
37
beacon-chain/db/kv/backfill_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestBackfillRoundtrip(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
b := &dbval.BackfillStatus{}
|
||||
b.HighSlot = 23
|
||||
b.LowSlot = 13
|
||||
b.HighRoot = bytesutil.PadTo([]byte("high"), 42)
|
||||
b.LowRoot = bytesutil.PadTo([]byte("low"), 24)
|
||||
m, err := proto.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
ub := &dbval.BackfillStatus{}
|
||||
require.NoError(t, proto.Unmarshal(m, ub))
|
||||
require.Equal(t, b.HighSlot, ub.HighSlot)
|
||||
require.DeepEqual(t, b.HighRoot, ub.HighRoot)
|
||||
require.Equal(t, b.LowSlot, ub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveBackfillStatus(ctx, b))
|
||||
dbub, err := db.BackfillStatus(ctx)
|
||||
|
||||
require.Equal(t, b.HighSlot, dbub.HighSlot)
|
||||
require.DeepEqual(t, b.HighRoot, dbub.HighRoot)
|
||||
require.Equal(t, b.LowSlot, dbub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
|
||||
}
|
||||
@@ -70,25 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
return root, err
|
||||
}
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(backfillBlockRootKey)
|
||||
if len(rootSlice) == 0 {
|
||||
return ErrNotFoundBackfillBlockRoot
|
||||
}
|
||||
root = bytesutil.ToBytes32(rootSlice)
|
||||
return nil
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -417,17 +398,6 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
|
||||
@@ -92,23 +92,6 @@ var blockTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := db.BackfillBlockRoot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||
|
||||
var expected [32]byte
|
||||
copy(expected[:], []byte{0x23})
|
||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||
require.NoError(t, err)
|
||||
actual, err := db.BackfillBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
BlockCacheSize = 1
|
||||
slot := primitives.Slot(20)
|
||||
@@ -543,7 +526,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 150, len(retrieved))
|
||||
for _, b := range retrieved {
|
||||
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
|
||||
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpected block slot %d", b.Block().Slot())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -57,8 +57,8 @@ var (
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
backfillBlockRootKey = []byte("backfill-block-root")
|
||||
// tracking data about an ongoing backfill
|
||||
backfillStatusKey = []byte("backfill-status")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -24,11 +25,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
}
|
||||
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
|
||||
}
|
||||
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
|
||||
}
|
||||
|
||||
cf, err := detect.FromState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
|
||||
@@ -50,11 +46,24 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
}
|
||||
blk := wblk.Block()
|
||||
|
||||
// save block
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
|
||||
bf := &dbval.BackfillStatus{
|
||||
HighSlot: uint64(wblk.Block().Slot()),
|
||||
HighRoot: blockRoot[:],
|
||||
LowSlot: 0,
|
||||
LowRoot: genesisRoot[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync.")
|
||||
}
|
||||
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
],
|
||||
@@ -90,7 +89,6 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_processing_test.go",
|
||||
"prometheus_test.go",
|
||||
"provider_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
@@ -122,7 +120,6 @@ go_test(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
)
|
||||
|
||||
@@ -15,7 +16,7 @@ type Option func(s *Service) error
|
||||
// WithHttpEndpoint parse http endpoint for the powchain service to use.
|
||||
func WithHttpEndpoint(endpointString string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.currHttpEndpoint = HttpEndpoint(endpointString)
|
||||
s.cfg.currHttpEndpoint = network.HttpEndpoint(endpointString)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -27,7 +28,7 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
|
||||
return nil
|
||||
}
|
||||
// Overwrite authorization type for all endpoints to be of a bearer type.
|
||||
hEndpoint := HttpEndpoint(endpointString)
|
||||
hEndpoint := network.HttpEndpoint(endpointString)
|
||||
hEndpoint.Auth.Method = authorization.Bearer
|
||||
hEndpoint.Auth.Value = string(secret)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func TestCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pc, err := NewPowchainCollector(ctx)
|
||||
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
|
||||
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
|
||||
unregistered := pc.unregister()
|
||||
assert.Equal(t, true, unregistered, "PowchainCollector.unregister did not return true (via prometheus.DefaultRegistry)")
|
||||
// PowchainCollector is a prometheus.Collector, so we should be able to register it again
|
||||
@@ -39,7 +39,7 @@ func TestCleanup(t *testing.T) {
|
||||
func TestCancelation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pc, err := NewPowchainCollector(ctx)
|
||||
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
|
||||
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
cancel()
|
||||
select {
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
)
|
||||
|
||||
// HttpEndpoint extracts an httputils.Endpoint from the provider parameter.
|
||||
func HttpEndpoint(eth1Provider string) network.Endpoint {
|
||||
endpoint := network.Endpoint{
|
||||
Url: "",
|
||||
Auth: network.AuthorizationData{
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
|
||||
authValues := strings.Split(eth1Provider, ",")
|
||||
endpoint.Url = strings.TrimSpace(authValues[0])
|
||||
if len(authValues) > 2 {
|
||||
log.Errorf(
|
||||
"ETH1 endpoint string can contain one comma for specifying the authorization header to access the provider."+
|
||||
" String contains too many commas: %d. Skipping authorization.", len(authValues)-1)
|
||||
} else if len(authValues) == 2 {
|
||||
switch network.Method(strings.TrimSpace(authValues[1])) {
|
||||
case authorization.Basic:
|
||||
basicAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
|
||||
if len(basicAuthValues) != 2 {
|
||||
log.Errorf("Basic Authentication has incorrect format. Skipping authorization.")
|
||||
} else {
|
||||
endpoint.Auth.Method = authorization.Basic
|
||||
endpoint.Auth.Value = base64.StdEncoding.EncodeToString([]byte(basicAuthValues[1]))
|
||||
}
|
||||
case authorization.Bearer:
|
||||
bearerAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
|
||||
if len(bearerAuthValues) != 2 {
|
||||
log.Errorf("Bearer Authentication has incorrect format. Skipping authorization.")
|
||||
} else {
|
||||
endpoint.Auth.Method = authorization.Bearer
|
||||
endpoint.Auth.Value = bearerAuthValues[1]
|
||||
}
|
||||
case authorization.None:
|
||||
log.Errorf("Authorization has incorrect format or authorization type is not supported.")
|
||||
}
|
||||
}
|
||||
return endpoint
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestHttpEndpoint(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
url := "http://test"
|
||||
|
||||
t.Run("URL", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url)
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("URL with separator", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("URL with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(" " + url + " ,")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("Basic auth", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Basic username:password")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
|
||||
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Basic auth with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ", Basic username:password ")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
|
||||
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Basic auth with incorrect format", func(t *testing.T) {
|
||||
hook.Reset()
|
||||
endpoint := HttpEndpoint(url + ",Basic username:password foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
t.Run("Bearer auth", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Bearer token")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
|
||||
assert.Equal(t, "token", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Bearer auth with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ", Bearer token ")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
|
||||
assert.Equal(t, "token", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Bearer auth with incorrect format", func(t *testing.T) {
|
||||
hook.Reset()
|
||||
endpoint := HttpEndpoint(url + ",Bearer token foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
t.Run("Too many separators", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Bearer token,foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package execution
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -107,26 +106,10 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
client, err := network.NewExecutionRPCClient(ctx, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "", "ipc":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
|
||||
@@ -228,6 +228,39 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
return n.root, nil
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
|
||||
// known chain in forkchoice.
|
||||
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
node, ok := f.store.nodeByRoot[cp.Root]
|
||||
if !ok || node == nil {
|
||||
return false, nil
|
||||
}
|
||||
epochStart, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if node.slot > epochStart {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(node.children) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
if node.slot == epochStart {
|
||||
return true, nil
|
||||
}
|
||||
nodeEpoch := slots.ToEpoch(node.slot)
|
||||
if nodeEpoch >= cp.Epoch {
|
||||
return false, nil
|
||||
}
|
||||
for _, child := range node.children {
|
||||
if child.slot > epochStart {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes.
|
||||
func (f *ForkChoice) updateBalances() error {
|
||||
@@ -594,3 +627,12 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the given root if it's known to forkchoice
|
||||
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.slot, nil
|
||||
}
|
||||
|
||||
@@ -754,3 +754,110 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
|
||||
got := f.UnrealizedJustifiedPayloadBlockHash()
|
||||
require.Equal(t, [32]byte{'A'}, got)
|
||||
}
|
||||
|
||||
func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, broot))
|
||||
|
||||
// Epoch start
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children but impossible checkpoint
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, croot))
|
||||
|
||||
// Children in same epoch
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, droot))
|
||||
|
||||
// Children in next epoch but boundary
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Boundary block
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Children in next epoch
|
||||
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, eroot))
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
}
|
||||
|
||||
func TestForkChoiceSlot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
_, err = f.Slot(root)
|
||||
require.ErrorIs(t, ErrNilNode, err)
|
||||
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
slot, err := f.Slot(root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(3), slot)
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ type Getter interface {
|
||||
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, primitives.Slot, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
@@ -66,6 +67,7 @@ type Getter interface {
|
||||
Tips() ([][32]byte, []primitives.Slot)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
ShouldOverrideFCU() bool
|
||||
Slot([32]byte) (primitives.Slot, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -209,6 +209,18 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := bfs.Reload(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
bf, err := backfill.NewService(ctx,
|
||||
backfill.WithGenesisWaiter(beacon.genesisWaiter),
|
||||
backfill.WithStatusUpdater(bfs),
|
||||
backfill.WithBackfillDB(beacon.db),
|
||||
backfill.WithP2P(beacon.fetchP2P()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "backfill service initialization error")
|
||||
}
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return nil, errors.Wrap(err, "error registering backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
@@ -496,7 +508,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.StatusUpdater, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
|
||||
@@ -202,7 +202,7 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
if string(c) == "%" {
|
||||
next := string(topic[i+1])
|
||||
if next != "d" && next != "x" {
|
||||
t.Errorf("Topic %s has formatting incompatiable with scanfcheck. Only %%d and %%x are supported", topic)
|
||||
t.Errorf("Topic %s has formatting incompatible with scanfcheck. Only %%d and %%x are supported", topic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
|
||||
@@ -22,7 +22,7 @@ func (ds *Server) GetPeer(_ context.Context, peerReq *ethpb.PeerRequest) (*ethpb
|
||||
return ds.getPeer(pid)
|
||||
}
|
||||
|
||||
// ListPeers returns all peers known to the host node, irregardless of if they are connected/
|
||||
// ListPeers returns all peers known to the host node, regardless of if they are connected/
|
||||
// disconnected.
|
||||
func (ds *Server) ListPeers(_ context.Context, _ *empty.Empty) (*ethpb.DebugPeerResponses, error) {
|
||||
var responses []*ethpb.DebugPeerResponse
|
||||
|
||||
@@ -127,7 +127,6 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
|
||||
@@ -409,7 +409,7 @@ func (m *MaxSpanChunksSlice) Update(
|
||||
// a min span chunk for use in chunk updates. To compute this value, we look at the difference between
|
||||
// H = historyLength and the current epoch. Then, we check if the source epoch > difference. If so,
|
||||
// then the start epoch is source epoch - 1. Otherwise, we return to the caller a boolean signifying
|
||||
// the input argumets are invalid for the chunk and the start epoch does not exist.
|
||||
// the input arguments are invalid for the chunk and the start epoch does not exist.
|
||||
func (m *MinSpanChunksSlice) StartEpoch(
|
||||
sourceEpoch, currentEpoch primitives.Epoch,
|
||||
) (epoch primitives.Epoch, exists bool) {
|
||||
|
||||
@@ -348,13 +348,13 @@ func TestBeaconState_HashTreeRoot(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
t.Errorf("Expected error, expected %v, received %v", tt.error, err)
|
||||
}
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(testState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
t.Errorf("Expected error, expected %v, received %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
@@ -435,13 +435,13 @@ func TestBeaconState_HashTreeRoot_FieldTrie(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
t.Errorf("Expected error, expected %v, received %v", tt.error, err)
|
||||
}
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(testState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
t.Errorf("Expected error, expected %v, received %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
|
||||
@@ -50,7 +50,7 @@ type State struct {
|
||||
finalizedInfo *finalizedInfo
|
||||
epochBoundaryStateCache *epochBoundaryState
|
||||
saveHotStateDB *saveHotStateDbConfig
|
||||
backfillStatus *backfill.Status
|
||||
backfillStatus *backfill.StatusUpdater
|
||||
migrationLock *sync.Mutex
|
||||
fc forkchoice.ForkChoicer
|
||||
}
|
||||
@@ -77,7 +77,7 @@ type finalizedInfo struct {
|
||||
// StateGenOption is a functional option for controlling the initialization of a *State value
|
||||
type StateGenOption func(*State)
|
||||
|
||||
func WithBackfillStatus(bfs *backfill.Status) StateGenOption {
|
||||
func WithBackfillStatus(bfs *backfill.StatusUpdater) StateGenOption {
|
||||
return func(sg *State) {
|
||||
sg.backfillStatus = bfs
|
||||
}
|
||||
|
||||
@@ -237,7 +237,7 @@ func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte
|
||||
return root, layers, nil
|
||||
}
|
||||
|
||||
// AddInMixin describes a method from which a lenth mixin is added to the
|
||||
// AddInMixin describes a method from which a length mixin is added to the
|
||||
// provided root.
|
||||
func AddInMixin(root [32]byte, length uint64) ([32]byte, error) {
|
||||
rootBuf := new(bytes.Buffer)
|
||||
|
||||
@@ -2,15 +2,27 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"service.go",
|
||||
"status.go",
|
||||
"worker.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,11 +31,13 @@ go_test(
|
||||
srcs = ["status_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
34
beacon-chain/sync/backfill/batch.go
Normal file
34
beacon-chain/sync/backfill/batch.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
scheduled time.Time
|
||||
retries int
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results []blocks.ROBlock
|
||||
err error
|
||||
succeeded bool
|
||||
}
|
||||
|
||||
func (b batch) logFields() log.Fields {
|
||||
return map[string]interface{}{
|
||||
"batch_id": b.id(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"retries": b.retries,
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) id() batchId {
|
||||
return batchId(fmt.Sprintf("%d:%d", b.begin, b.end))
|
||||
}
|
||||
109
beacon-chain/sync/backfill/batcher.go
Normal file
109
beacon-chain/sync/backfill/batcher.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var ErrRetryLimitExceeded = errors.New("Unable to retrieve backfill batch")
|
||||
|
||||
type batcher struct {
|
||||
nWorkers int
|
||||
size primitives.Slot
|
||||
su *StatusUpdater
|
||||
todo chan batch
|
||||
done chan batch
|
||||
errc chan error
|
||||
// outstanding is keyed by the id of the batch that is relied on
|
||||
// ie if batch id 2 relies on batch id 1, and 1 is head
|
||||
outstanding map[batchId]*batch
|
||||
nextId batchId
|
||||
lastId batchId
|
||||
}
|
||||
|
||||
func (br *batcher) run(ctx context.Context) {
|
||||
status := br.su.Status()
|
||||
// Set min at bottom of backfill range. Add 1 because range is inclusive.
|
||||
min := primitives.Slot(status.LowSlot) + 1
|
||||
initial := br.next(min, primitives.Slot(status.HighSlot))
|
||||
br.nextId, br.lastId = initial.id(), initial.id()
|
||||
br.outstanding[initial.id()] = &initial
|
||||
br.todo <- initial
|
||||
for {
|
||||
for i := 0; i < br.nWorkers-len(br.outstanding); i++ {
|
||||
last := br.outstanding[br.lastId]
|
||||
newLast := br.next(min, last.begin)
|
||||
br.outstanding[newLast.id()] = &newLast
|
||||
br.lastId = newLast.id()
|
||||
br.todo <- newLast
|
||||
}
|
||||
select {
|
||||
case b := <-br.done:
|
||||
if err := br.completeBatch(b); err != nil {
|
||||
br.errc <- err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (br *batcher) completeBatch(b batch) error {
|
||||
// if the batch failed, send it back to the work queue.
|
||||
// we have no limit on the number of retries, because all batches are necessary.
|
||||
if b.err != nil {
|
||||
b.err = nil
|
||||
br.outstanding[b.id()] = &b
|
||||
br.todo <- b
|
||||
return nil
|
||||
}
|
||||
|
||||
br.outstanding[b.id()] = &b
|
||||
if err := br.includeCompleted(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (br *batcher) includeCompleted() error {
|
||||
for len(br.outstanding) > 0 {
|
||||
b := br.outstanding[br.nextId]
|
||||
if !b.succeeded {
|
||||
return nil
|
||||
}
|
||||
if err := br.updateDB(*b); err != nil {
|
||||
return err
|
||||
}
|
||||
status := br.su.Status()
|
||||
min := primitives.Slot(status.LowSlot)
|
||||
promote := br.outstanding[br.next(min, b.begin).id()]
|
||||
br.nextId = promote.id()
|
||||
delete(br.outstanding, b.id())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (br *batcher) updateDB(b batch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (br *batcher) next(min, upper primitives.Slot) batch {
|
||||
n := batch{begin: min}
|
||||
n.end = upper // Batches don't overlap because end is exclusive, begin is inclusive.
|
||||
if upper > br.size+min {
|
||||
n.begin = upper - br.size
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func newBatcher(size primitives.Slot, su *StatusUpdater, todo, done chan batch) *batcher {
|
||||
return &batcher{
|
||||
size: size,
|
||||
su: su,
|
||||
todo: todo,
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
140
beacon-chain/sync/backfill/service.go
Normal file
140
beacon-chain/sync/backfill/service.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const defaultWorkerCount = 1
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
genesisWaiter startup.GenesisWaiter
|
||||
genesis *startup.Genesis
|
||||
clock startup.Clock
|
||||
su *StatusUpdater
|
||||
db BackfillDB
|
||||
p2p p2p.P2P
|
||||
nWorkers int
|
||||
todo chan batch
|
||||
done chan batch
|
||||
errChan chan error
|
||||
workers map[workerId]*p2pWorker
|
||||
batcher *batcher
|
||||
batchSize uint64
|
||||
}
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
type ServiceOption func(*Service) error
|
||||
|
||||
func WithStatusUpdater(su *StatusUpdater) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.su = su
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithGenesisWaiter(gw startup.GenesisWaiter) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.genesisWaiter = gw
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBackfillDB(db BackfillDB) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.db = db
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithP2P(p p2p.P2P) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.p2p = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithWorkerCount(n int) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.nWorkers = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewService(ctx context.Context, opts ...ServiceOption) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.nWorkers == 0 {
|
||||
s.nWorkers = defaultWorkerCount
|
||||
}
|
||||
if s.todo == nil {
|
||||
s.todo = make(chan batch)
|
||||
}
|
||||
if s.done == nil {
|
||||
s.done = make(chan batch)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) Start() {
|
||||
genesis, err := s.genesisWaiter.WaitForGenesis(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("backfill service failed to start while waiting for genesis data")
|
||||
}
|
||||
s.clock = genesis.Clock()
|
||||
if err := s.spawnBatcher(); err != nil {
|
||||
log.WithError(err).Fatal("error starting backfill service")
|
||||
}
|
||||
s.spawnWorkers()
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case err := <-s.errChan:
|
||||
if err := s.tryRecover(err); err != nil {
|
||||
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) tryRecover(err error) error {
|
||||
log.WithError(err).Error("error from the batcher")
|
||||
// If error is not recoverable, reply with an error, which will shut down the service.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) spawnWorkers() {
|
||||
for i := 0; i < s.nWorkers; i++ {
|
||||
id := workerId(i)
|
||||
s.workers[id] = newP2pWorker(id, s.p2p, s.todo, s.done)
|
||||
go s.workers[id].run(s.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) spawnBatcher() error {
|
||||
s.batcher = newBatcher(primitives.Slot(s.batchSize), s.su, s.todo, s.done)
|
||||
go s.batcher.run(s.ctx)
|
||||
return nil
|
||||
}
|
||||
@@ -2,121 +2,152 @@ package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// NewStatus correctly initializes a Status value with the required database value.
|
||||
func NewStatus(store BackfillDB) *Status {
|
||||
return &Status{
|
||||
// NewStatus correctly initializes a StatusUpdater value with the required database value.
|
||||
func NewStatus(store BackfillDB) *StatusUpdater {
|
||||
return &StatusUpdater{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// Status provides a way to update and query the status of a backfill process that may be necessary to track when
|
||||
// StatusUpdater provides a way to update and query the status of a backfill process that may be necessary to track when
|
||||
// a node was initialized via checkpoint sync. With checkpoint sync, there will be a gap in node history from genesis
|
||||
// until the checkpoint sync origin block. Status provides the means to update the value keeping track of the lower
|
||||
// end of the missing block range via the Advance() method, to check whether a Slot is missing from the database
|
||||
// until the checkpoint sync origin block. StatusUpdater provides the means to update the value keeping track of the lower
|
||||
// end of the missing block range via the FillFwd() method, to check whether a Slot is missing from the database
|
||||
// via the SlotCovered() method, and to see the current StartGap() and EndGap().
|
||||
type Status struct {
|
||||
start primitives.Slot
|
||||
end primitives.Slot
|
||||
type StatusUpdater struct {
|
||||
sync.RWMutex
|
||||
store BackfillDB
|
||||
genesisSync bool
|
||||
status *dbval.BackfillStatus
|
||||
}
|
||||
|
||||
// SlotCovered uses StartGap() and EndGap() to determine if the given slot is covered by the current chain history.
|
||||
// If the slot is <= StartGap(), or >= EndGap(), the result is true.
|
||||
// If the slot is between StartGap() and EndGap(), the result is false.
|
||||
func (s *Status) SlotCovered(sl primitives.Slot) bool {
|
||||
// SlotCovered determines if the given slot is covered by the current chain history.
|
||||
// If the slot is <= backfill low slot, or >= backfill high slot, the result is true.
|
||||
// If the slot is between the backfill low and high slots, the result is false.
|
||||
func (s *StatusUpdater) SlotCovered(sl primitives.Slot) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
// short circuit if the node was synced from genesis
|
||||
if s.genesisSync {
|
||||
return true
|
||||
}
|
||||
if s.StartGap() < sl && sl < s.EndGap() {
|
||||
if s.status.LowSlot < uint64(sl) && uint64(sl) < s.status.HighSlot {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// StartGap returns the slot at the beginning of the range that needs to be backfilled.
|
||||
func (s *Status) StartGap() primitives.Slot {
|
||||
return s.start
|
||||
}
|
||||
var ErrFillFwdPastUpper = errors.New("cannot move backfill StatusUpdater above upper bound of backfill")
|
||||
var ErrFillBackPastLower = errors.New("cannot move backfill StatusUpdater below lower bound of backfill")
|
||||
|
||||
// EndGap returns the slot at the end of the range that needs to be backfilled.
|
||||
func (s *Status) EndGap() primitives.Slot {
|
||||
return s.end
|
||||
}
|
||||
|
||||
var ErrAdvancePastOrigin = errors.New("cannot advance backfill Status beyond the origin checkpoint slot")
|
||||
|
||||
// Advance advances the backfill position to the given slot & root.
|
||||
// It updates the backfill block root entry in the database,
|
||||
// and also updates the Status value's copy of the backfill position slot.
|
||||
func (s *Status) Advance(ctx context.Context, upTo primitives.Slot, root [32]byte) error {
|
||||
if upTo > s.end {
|
||||
return errors.Wrapf(ErrAdvancePastOrigin, "advance slot=%d, origin slot=%d", upTo, s.end)
|
||||
// FillFwd moves the lower bound of the backfill status to the given slot & root,
|
||||
// saving the new state to the database and then updating StatusUpdater's in-memory copy with the saved value.
|
||||
func (s *StatusUpdater) FillFwd(ctx context.Context, newLow primitives.Slot, root [32]byte) error {
|
||||
status := s.Status()
|
||||
unl := uint64(newLow)
|
||||
if unl > status.HighSlot {
|
||||
return errors.Wrapf(ErrFillFwdPastUpper, "advance slot=%d, origin slot=%d", unl, status.HighSlot)
|
||||
}
|
||||
s.start = upTo
|
||||
return s.store.SaveBackfillBlockRoot(ctx, root)
|
||||
status.LowSlot = unl
|
||||
status.LowRoot = root[:]
|
||||
return s.updateStatus(ctx, status)
|
||||
}
|
||||
|
||||
// Reload queries the database for backfill status, initializing the internal data and validating the database state.
|
||||
func (s *Status) Reload(ctx context.Context) error {
|
||||
cpRoot, err := s.store.OriginCheckpointBlockRoot(ctx)
|
||||
// FillBack moves the upper bound of the backfill status to the given slot & root,
|
||||
// saving the new state to the database and then updating StatusUpdater's in-memory copy with the saved value.
|
||||
func (s *StatusUpdater) FillBack(ctx context.Context, newHigh primitives.Slot, root [32]byte) error {
|
||||
status := s.Status()
|
||||
unh := uint64(newHigh)
|
||||
if unh < status.LowSlot {
|
||||
return errors.Wrapf(ErrFillBackPastLower, "advance slot=%d, origin slot=%d", unh, status.LowSlot)
|
||||
}
|
||||
status.HighSlot = unh
|
||||
status.HighRoot = root[:]
|
||||
return s.updateStatus(ctx, status)
|
||||
}
|
||||
|
||||
// recover will check to see if the db is from a legacy checkpoint sync and either build a new BackfillStatus
|
||||
// or label the node as synced from genesis.
|
||||
func (s *StatusUpdater) recoverLegacy(ctx context.Context) error {
|
||||
cpr, err := s.store.OriginCheckpointBlockRoot(ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
s.genesisSync = true
|
||||
return nil
|
||||
}
|
||||
|
||||
cpb, err := s.store.Block(ctx, cpr)
|
||||
if err != nil {
|
||||
// mark genesis sync and short circuit further lookups
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
s.genesisSync = true
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpr)
|
||||
}
|
||||
cpBlock, err := s.store.Block(ctx, cpRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpRoot)
|
||||
if err := blocks.BeaconBlockIsNil(cpb); err != nil {
|
||||
return errors.Wrapf(err, "nil block found for origin checkpoint root=%#x", cpr)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(cpBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
s.end = cpBlock.Block().Slot()
|
||||
|
||||
_, err = s.store.GenesisBlockRoot(ctx)
|
||||
gbr, err := s.store.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFoundGenesisBlockRoot) {
|
||||
return errors.Wrap(err, "genesis block root required for checkpoint sync")
|
||||
}
|
||||
return err
|
||||
}
|
||||
os := uint64(cpb.Block().Slot())
|
||||
bs := &dbval.BackfillStatus{
|
||||
HighSlot: os,
|
||||
HighRoot: cpr[:],
|
||||
LowSlot: 0,
|
||||
LowRoot: gbr[:],
|
||||
OriginSlot: os,
|
||||
OriginRoot: cpr[:],
|
||||
}
|
||||
return s.updateStatus(ctx, bs)
|
||||
}
|
||||
|
||||
bfRoot, err := s.store.BackfillBlockRoot(ctx)
|
||||
// Reload queries the database for backfill status, initializing the internal data and validating the database state.
|
||||
func (s *StatusUpdater) Reload(ctx context.Context) error {
|
||||
status, err := s.store.BackfillStatus(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFoundBackfillBlockRoot) {
|
||||
return errors.Wrap(err, "found origin checkpoint block root, but no backfill block root")
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
return s.recoverLegacy(ctx)
|
||||
}
|
||||
}
|
||||
return s.updateStatus(ctx, status)
|
||||
}
|
||||
|
||||
func (s *StatusUpdater) updateStatus(ctx context.Context, bs *dbval.BackfillStatus) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if proto.Equal(s.status, bs) {
|
||||
return nil
|
||||
}
|
||||
if err := s.store.SaveBackfillStatus(ctx, bs); err != nil {
|
||||
return err
|
||||
}
|
||||
bfBlock, err := s.store.Block(ctx, bfRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving block for backfill root=%#x", bfRoot)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(bfBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
s.start = bfBlock.Block().Slot()
|
||||
|
||||
s.status = bs
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackfillDB describes the set of DB methods that the Status type needs to function.
|
||||
type BackfillDB interface {
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
func (s *StatusUpdater) Status() *dbval.BackfillStatus {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return proto.Clone(s.status).(*dbval.BackfillStatus)
|
||||
}
|
||||
|
||||
// BackfillDB describes the set of DB methods that the StatusUpdater type needs to function.
|
||||
type BackfillDB interface {
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
OriginCheckpointBlockRoot(context.Context) ([32]byte, error)
|
||||
Block(context.Context, [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
GenesisBlockRoot(context.Context) ([32]byte, error)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -21,17 +23,28 @@ type mockBackfillDB struct {
|
||||
saveBackfillBlockRoot func(ctx context.Context, blockRoot [32]byte) error
|
||||
genesisBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
originCheckpointBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
backfillBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
block func(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
saveBackfillStatus func(ctx context.Context, status *dbval.BackfillStatus) error
|
||||
backfillStatus func(context.Context) (*dbval.BackfillStatus, error)
|
||||
status *dbval.BackfillStatus
|
||||
err error
|
||||
}
|
||||
|
||||
var _ BackfillDB = &mockBackfillDB{}
|
||||
|
||||
func (db *mockBackfillDB) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
if db.saveBackfillBlockRoot != nil {
|
||||
return db.saveBackfillBlockRoot(ctx, blockRoot)
|
||||
func (db *mockBackfillDB) SaveBackfillStatus(ctx context.Context, status *dbval.BackfillStatus) error {
|
||||
if db.saveBackfillStatus != nil {
|
||||
return db.saveBackfillStatus(ctx, status)
|
||||
}
|
||||
return errEmptyMockDBMethod
|
||||
db.status = status
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
|
||||
if db.backfillStatus != nil {
|
||||
return db.backfillStatus(ctx)
|
||||
}
|
||||
return db.status, nil
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
@@ -48,13 +61,6 @@ func (db *mockBackfillDB) OriginCheckpointBlockRoot(ctx context.Context) ([32]by
|
||||
return [32]byte{}, errEmptyMockDBMethod
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if db.backfillBlockRoot != nil {
|
||||
return db.backfillBlockRoot(ctx)
|
||||
}
|
||||
return [32]byte{}, errEmptyMockDBMethod
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if db.block != nil {
|
||||
return db.block(ctx, blockRoot)
|
||||
@@ -66,42 +72,42 @@ func TestSlotCovered(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
status *Status
|
||||
status *StatusUpdater
|
||||
result bool
|
||||
}{
|
||||
{
|
||||
name: "below start true",
|
||||
status: &Status{start: 1},
|
||||
status: &StatusUpdater{status: &dbval.BackfillStatus{LowSlot: 1}},
|
||||
slot: 0,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "above end true",
|
||||
status: &Status{end: 1},
|
||||
status: &StatusUpdater{status: &dbval.BackfillStatus{HighSlot: 1}},
|
||||
slot: 2,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "equal end true",
|
||||
status: &Status{end: 1},
|
||||
status: &StatusUpdater{status: &dbval.BackfillStatus{HighSlot: 1}},
|
||||
slot: 1,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "equal start true",
|
||||
status: &Status{start: 2},
|
||||
status: &StatusUpdater{status: &dbval.BackfillStatus{LowSlot: 2}},
|
||||
slot: 2,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "between false",
|
||||
status: &Status{start: 1, end: 3},
|
||||
status: &StatusUpdater{status: &dbval.BackfillStatus{LowSlot: 1, HighSlot: 3}},
|
||||
slot: 2,
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
name: "genesisSync always true",
|
||||
status: &Status{genesisSync: true},
|
||||
status: &StatusUpdater{genesisSync: true},
|
||||
slot: 100,
|
||||
result: true,
|
||||
},
|
||||
@@ -121,17 +127,17 @@ func TestAdvance(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
s := &Status{end: 100, store: mdb}
|
||||
s := &StatusUpdater{status: &dbval.BackfillStatus{HighSlot: 100}, store: mdb}
|
||||
var root [32]byte
|
||||
copy(root[:], []byte{0x23, 0x23})
|
||||
require.NoError(t, s.Advance(ctx, 90, root))
|
||||
require.NoError(t, s.FillFwd(ctx, 90, root))
|
||||
require.Equal(t, root, saveBackfillBuf[0])
|
||||
not := s.SlotCovered(95)
|
||||
require.Equal(t, false, not)
|
||||
|
||||
// this should still be len 1 after failing to advance
|
||||
require.Equal(t, 1, len(saveBackfillBuf))
|
||||
require.ErrorIs(t, s.Advance(ctx, s.end+1, root), ErrAdvancePastOrigin)
|
||||
require.ErrorIs(t, s.FillFwd(ctx, primitives.Slot(s.status.HighSlot)+1, root), ErrFillFwdPastUpper)
|
||||
// this has an element in it from the previous test, there shouldn't be an additional one
|
||||
require.Equal(t, 1, len(saveBackfillBuf))
|
||||
}
|
||||
@@ -171,7 +177,7 @@ func TestReload(t *testing.T) {
|
||||
name string
|
||||
db BackfillDB
|
||||
err error
|
||||
expected *Status
|
||||
expected *StatusUpdater
|
||||
}{
|
||||
/*{
|
||||
name: "origin not found, implying genesis sync ",
|
||||
@@ -180,7 +186,7 @@ func TestReload(t *testing.T) {
|
||||
originCheckpointBlockRoot: func(ctx context.Context) ([32]byte, error) {
|
||||
return [32]byte{}, db.ErrNotFoundOriginBlockRoot
|
||||
}},
|
||||
expected: &Status{genesisSync: true},
|
||||
expected: &StatusUpdater{genesisSync: true},
|
||||
},
|
||||
{
|
||||
name: "genesis not found error",
|
||||
@@ -318,7 +324,7 @@ func TestReload(t *testing.T) {
|
||||
err: derp,
|
||||
},*/
|
||||
{
|
||||
name: "complete happy path",
|
||||
name: "legacy recovery",
|
||||
db: &mockBackfillDB{
|
||||
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
|
||||
originCheckpointBlockRoot: goodBlockRoot(originRoot),
|
||||
@@ -331,15 +337,15 @@ func TestReload(t *testing.T) {
|
||||
}
|
||||
return nil, errors.New("not derp")
|
||||
},
|
||||
backfillBlockRoot: goodBlockRoot(backfillRoot),
|
||||
backfillStatus: func(context.Context) (*dbval.BackfillStatus, error) { return nil, db.ErrNotFound },
|
||||
},
|
||||
err: derp,
|
||||
expected: &Status{genesisSync: false, start: backfillSlot, end: originSlot},
|
||||
expected: &StatusUpdater{genesisSync: false, status: &dbval.BackfillStatus{LowSlot: 0, HighSlot: uint64(originSlot)}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
s := &Status{
|
||||
s := &StatusUpdater{
|
||||
store: c.db,
|
||||
}
|
||||
err := s.Reload(ctx)
|
||||
@@ -352,7 +358,7 @@ func TestReload(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
require.Equal(t, c.expected.genesisSync, s.genesisSync)
|
||||
require.Equal(t, c.expected.start, s.start)
|
||||
require.Equal(t, c.expected.end, s.end)
|
||||
require.Equal(t, c.expected.status.LowSlot, s.status.LowSlot)
|
||||
require.Equal(t, c.expected.status.HighSlot, s.status.HighSlot)
|
||||
}
|
||||
}
|
||||
|
||||
44
beacon-chain/sync/backfill/worker.go
Normal file
44
beacon-chain/sync/backfill/worker.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type workerId int
|
||||
|
||||
type p2pWorker struct {
|
||||
id workerId
|
||||
p2p p2p.P2P
|
||||
todo chan batch
|
||||
done chan batch
|
||||
}
|
||||
|
||||
func (w *p2pWorker) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).Debug("Backfill worker received batch.")
|
||||
w.done <- w.handle(b)
|
||||
case <-ctx.Done():
|
||||
log.WithField("worker_id", w.id).Info("Backfill worker exiting after context canceled.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handle(b batch) batch {
|
||||
// if the batch is not successfully fetched and validated, increment the attempts counter
|
||||
return b
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
p2p: p,
|
||||
todo: todo,
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -152,20 +151,6 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
attSlot := signed.Message.Aggregate.Data.Slot
|
||||
// Only advance state if different epoch as the committee can only change on an epoch transition.
|
||||
if slots.ToEpoch(attSlot) > slots.ToEpoch(bs.Slot()) {
|
||||
startSlot, err := slots.EpochStart(slots.ToEpoch(attSlot))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
bs, err = transition.ProcessSlots(ctx, bs, startSlot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
}
|
||||
|
||||
// Verify validator index is within the beacon committee.
|
||||
if err := validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex); err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
|
||||
|
||||
@@ -329,7 +329,7 @@ func (s *Service) setSyncContributionBits(c *ethpb.SyncCommitteeContribution) er
|
||||
}
|
||||
bitsList, ok := v.([][]byte)
|
||||
if !ok {
|
||||
return errors.New("could not covert cached value to []bitfield.Bitvector")
|
||||
return errors.New("could not convert cached value to []bitfield.Bitvector")
|
||||
}
|
||||
has, err := bitListOverlaps(bitsList, c.AggregationBits)
|
||||
if err != nil {
|
||||
@@ -354,7 +354,7 @@ func (s *Service) hasSeenSyncContributionBits(c *ethpb.SyncCommitteeContribution
|
||||
}
|
||||
bitsList, ok := v.([][]byte)
|
||||
if !ok {
|
||||
return false, errors.New("could not covert cached value to []bitfield.Bitvector128")
|
||||
return false, errors.New("could not convert cached value to []bitfield.Bitvector128")
|
||||
}
|
||||
return bitListOverlaps(bitsList, c.AggregationBits.Bytes())
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"factory.go",
|
||||
"getters.go",
|
||||
"proto.go",
|
||||
"roblock.go",
|
||||
"setters.go",
|
||||
"types.go",
|
||||
],
|
||||
|
||||
63
consensus-types/blocks/roblock.go
Normal file
63
consensus-types/blocks/roblock.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
var ErrRootLength error = errors.New("incorrect length for hash_tree_root")
|
||||
|
||||
type ROBlock struct {
|
||||
interfaces.ReadOnlySignedBeaconBlock
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
func (b ROBlock) Root() [32]byte {
|
||||
return b.root
|
||||
}
|
||||
|
||||
func NewROBlock(b interfaces.ReadOnlySignedBeaconBlock, root [32]byte) ROBlock {
|
||||
return ROBlock{ReadOnlySignedBeaconBlock: b, root: root}
|
||||
}
|
||||
|
||||
// ROBlockSlice implements sort.Interface so that slices of ROBlocks can be easily sorted
|
||||
type ROBlockSlice []ROBlock
|
||||
|
||||
var _ sort.Interface = ROBlockSlice{}
|
||||
|
||||
// Less reports whether the element with index i must sort before the element with index j.
|
||||
func (s ROBlockSlice) Less(i, j int) bool {
|
||||
si, sj := s[i].Block().Slot(), s[j].Block().Slot()
|
||||
|
||||
// lower slot wins
|
||||
if si != sj {
|
||||
return s[i].Block().Slot() < s[j].Block().Slot()
|
||||
}
|
||||
|
||||
// break slot tie lexicographically comparing roots byte for byte
|
||||
ri, rj := s[i].Root(), s[j].Root()
|
||||
k := 0
|
||||
for ; k < fieldparams.RootLength; k++ {
|
||||
// advance the byte offset until you hit the end
|
||||
if ri[k] == rj[k] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if k == fieldparams.RootLength {
|
||||
return false
|
||||
}
|
||||
return ri[k] < rj[k]
|
||||
}
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (s ROBlockSlice) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s ROBlockSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func (m MetadataV0) MetadataObjV0() *pb.MetaDataV0 {
|
||||
return m.md
|
||||
}
|
||||
|
||||
// MetadataObjV1 returns the inner metatdata object in its type
|
||||
// MetadataObjV1 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (_ MetadataV0) MetadataObjV1() *pb.MetaDataV1 {
|
||||
return nil
|
||||
@@ -147,7 +147,7 @@ func (_ MetadataV1) MetadataObjV0() *pb.MetaDataV0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetadataObjV1 returns the inner metatdata object in its type
|
||||
// MetadataObjV1 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (m MetadataV1) MetadataObjV1() *pb.MetaDataV1 {
|
||||
return m.md
|
||||
|
||||
@@ -14,7 +14,7 @@ var _ heap.Interface = &queue{}
|
||||
|
||||
// some tests rely on the ordering of items from this method
|
||||
func testCases() (tc []*Item) {
|
||||
// create a slice of items with priority / times offest by these seconds
|
||||
// create a slice of items with priority / times offset by these seconds
|
||||
for i, m := range []time.Duration{
|
||||
5,
|
||||
183600, // 51 hours
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestValidatorRegister_OK(t *testing.T) {
|
||||
merkleTreeIndex[i] = binary.LittleEndian.Uint64(idx)
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(0), merkleTreeIndex[0], "Deposit event total desposit count miss matched")
|
||||
assert.Equal(t, uint64(1), merkleTreeIndex[1], "Deposit event total desposit count miss matched")
|
||||
assert.Equal(t, uint64(2), merkleTreeIndex[2], "Deposit event total desposit count miss matched")
|
||||
assert.Equal(t, uint64(0), merkleTreeIndex[0], "Deposit event total deposit count mismatched")
|
||||
assert.Equal(t, uint64(1), merkleTreeIndex[1], "Deposit event total deposit count mismatched")
|
||||
assert.Equal(t, uint64(2), merkleTreeIndex[2], "Deposit event total deposit count mismatched")
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (cf *VersionedUnmarshaler) UnmarshalBeaconState(marshaled []byte) (s state.
|
||||
}
|
||||
|
||||
var beaconBlockSlot = fieldSpec{
|
||||
// ssz variable length offset (not to be confused with the fieldSpec offest) is a uint32
|
||||
// ssz variable length offset (not to be confused with the fieldSpec offset) is a uint32
|
||||
// variable length. Offsets come before fixed length data, so that's 4 bytes at the beginning
|
||||
// then signature is 96 bytes, 4+96 = 100
|
||||
offset: 100,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Continous Integration script to check that BUILD.bazel files are as expected
|
||||
# Continuous Integration script to check that BUILD.bazel files are as expected
|
||||
# when generated from gazelle.
|
||||
|
||||
# Duplicate redirect 5 to stdout so that it can be captured, but still printed
|
||||
|
||||
@@ -155,7 +155,7 @@ cat << EOF
|
||||
|
||||
-c Move discovered coverage reports to the trash
|
||||
-z FILE Upload specified file directly to Codecov and bypass all report generation.
|
||||
This is inteded to be used only with a pre-formatted Codecov report and is not
|
||||
This is intended to be used only with a pre-formatted Codecov report and is not
|
||||
expected to work under any other circumstances.
|
||||
-Z Exit with 1 if not successful. Default will Exit with 0
|
||||
|
||||
|
||||
8
hack/latest_version_tag.sh
Executable file
8
hack/latest_version_tag.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Prints the latest git version tag, like "v2.12.8"
|
||||
git tag -l 'v*' --sort=creatordate |
|
||||
perl -nle 'if (/^v\d+\.\d+\.\d+$/) { print $_ }' |
|
||||
tail -n1
|
||||
|
||||
@@ -70,7 +70,7 @@ if git diff-index --quiet HEAD --; then
|
||||
echo "nothing to push, exiting early"
|
||||
exit 0
|
||||
else
|
||||
echo "changes detected, commiting and pushing to ethereumapis"
|
||||
echo "changes detected, committing and pushing to ethereumapis"
|
||||
fi
|
||||
|
||||
# Push to the mirror repository
|
||||
|
||||
@@ -18,8 +18,8 @@ searchstring="prysmaticlabs/prysm/v4/"
|
||||
for ((i = 0; i < arraylength; i++)); do
|
||||
color "34" "$destination"
|
||||
destination=${file_list[i]#*$searchstring}
|
||||
chmod 755 "$destination"
|
||||
cp -R -L "${file_list[i]}" "$destination"
|
||||
chmod 755 "$destination"
|
||||
done
|
||||
|
||||
# Run goimports on newly generated protos
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: The STABLE_ prefix will force a relink when the value changes when using rules_go x_defs.
|
||||
repo_url=$(git config --get remote.origin.url)
|
||||
echo "REPO_URL $repo_url"
|
||||
|
||||
echo STABLE_GIT_COMMIT "continuous-integration"
|
||||
echo DATE "now"
|
||||
echo DATE_UNIX "0"
|
||||
echo DOCKER_TAG "ci-foo"
|
||||
echo STABLE_GIT_TAG "c1000deadbeef"
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
echo "COMMIT_SHA $commit_sha"
|
||||
|
||||
echo "GIT_BRANCH $git_branch"
|
||||
|
||||
git_tree_status=$(git diff-index --quiet HEAD -- && echo 'Clean' || echo 'Modified')
|
||||
echo "GIT_TREE_STATUS $git_tree_status"
|
||||
|
||||
# Note: the "STABLE_" suffix causes these to be part of the "stable" workspace
|
||||
# status, which may trigger rebuilds of certain targets if these values change
|
||||
# and you're building with the "--stamp" flag.
|
||||
latest_version_tag=$(./hack/latest_version_tag.sh)
|
||||
echo "STABLE_VERSION_TAG $latest_version_tag"
|
||||
echo "STABLE_COMMIT_SHA $commit_sha"
|
||||
echo "STABLE_GIT_TAG $latest_version_tag"
|
||||
|
||||
@@ -12,7 +12,7 @@ type Scraper interface {
|
||||
|
||||
// An Updater can take the io.Reader created by Scraper and
|
||||
// send it to a data sink for consumption. An Updater is used
|
||||
// for instance ot send the scraped data for a beacon-node to
|
||||
// for instance to send the scraped data for a beacon-node to
|
||||
// a remote client-stats endpoint.
|
||||
type Updater interface {
|
||||
Update(io.Reader) error
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//network/authorization:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -32,5 +33,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Endpoint is an endpoint with authorization data.
|
||||
@@ -53,6 +59,46 @@ func (d *AuthorizationData) ToHeaderValue() (string, error) {
|
||||
return "", errors.New("could not create HTTP header for unknown authorization method")
|
||||
}
|
||||
|
||||
// HttpEndpoint extracts an httputils.Endpoint from the provider parameter.
|
||||
func HttpEndpoint(eth1Provider string) Endpoint {
|
||||
endpoint := Endpoint{
|
||||
Url: "",
|
||||
Auth: AuthorizationData{
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
|
||||
authValues := strings.Split(eth1Provider, ",")
|
||||
endpoint.Url = strings.TrimSpace(authValues[0])
|
||||
if len(authValues) > 2 {
|
||||
log.Errorf(
|
||||
"ETH1 endpoint string can contain one comma for specifying the authorization header to access the provider."+
|
||||
" String contains too many commas: %d. Skipping authorization.", len(authValues)-1)
|
||||
} else if len(authValues) == 2 {
|
||||
switch Method(strings.TrimSpace(authValues[1])) {
|
||||
case authorization.Basic:
|
||||
basicAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
|
||||
if len(basicAuthValues) != 2 {
|
||||
log.Errorf("Basic Authentication has incorrect format. Skipping authorization.")
|
||||
} else {
|
||||
endpoint.Auth.Method = authorization.Basic
|
||||
endpoint.Auth.Value = base64.StdEncoding.EncodeToString([]byte(basicAuthValues[1]))
|
||||
}
|
||||
case authorization.Bearer:
|
||||
bearerAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
|
||||
if len(bearerAuthValues) != 2 {
|
||||
log.Errorf("Bearer Authentication has incorrect format. Skipping authorization.")
|
||||
} else {
|
||||
endpoint.Auth.Method = authorization.Bearer
|
||||
endpoint.Auth.Value = bearerAuthValues[1]
|
||||
}
|
||||
case authorization.None:
|
||||
log.Errorf("Authorization has incorrect format or authorization type is not supported.")
|
||||
}
|
||||
}
|
||||
return endpoint
|
||||
}
|
||||
|
||||
// Method returns the authorizationmethod.AuthorizationMethod corresponding with the parameter value.
|
||||
func Method(auth string) authorization.AuthorizationMethod {
|
||||
if strings.HasPrefix(strings.ToLower(auth), "basic") {
|
||||
@@ -76,3 +122,27 @@ func NewHttpClientWithSecret(secret string) *http.Client {
|
||||
Transport: authTransport,
|
||||
}
|
||||
}
|
||||
|
||||
func NewExecutionRPCClient(ctx context.Context, endpoint Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "", "ipc":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestToHeaderValue(t *testing.T) {
|
||||
@@ -140,3 +141,68 @@ func TestAuthorizationDataEquals(t *testing.T) {
|
||||
assert.Equal(t, false, d.Equals(other))
|
||||
})
|
||||
}
|
||||
|
||||
func TestHttpEndpoint(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
url := "http://test"
|
||||
|
||||
t.Run("URL", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url)
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("URL with separator", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("URL with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(" " + url + " ,")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
})
|
||||
t.Run("Basic auth", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Basic username:password")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
|
||||
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Basic auth with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ", Basic username:password ")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
|
||||
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Basic auth with incorrect format", func(t *testing.T) {
|
||||
hook.Reset()
|
||||
endpoint := HttpEndpoint(url + ",Basic username:password foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
t.Run("Bearer auth", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Bearer token")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
|
||||
assert.Equal(t, "token", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Bearer auth with whitespace", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ", Bearer token ")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
|
||||
assert.Equal(t, "token", endpoint.Auth.Value)
|
||||
})
|
||||
t.Run("Bearer auth with incorrect format", func(t *testing.T) {
|
||||
hook.Reset()
|
||||
endpoint := HttpEndpoint(url + ",Bearer token foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
t.Run("Too many separators", func(t *testing.T) {
|
||||
endpoint := HttpEndpoint(url + ",Bearer token,foo")
|
||||
assert.Equal(t, url, endpoint.Url)
|
||||
assert.Equal(t, authorization.None, endpoint.Auth.Method)
|
||||
assert.LogsContain(t, hook, "Skipping authorization")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// IsForkNextEpoch checks if an alloted fork is in the following epoch.
|
||||
// IsForkNextEpoch checks if an allotted fork is in the following epoch.
|
||||
func IsForkNextEpoch(genesisTime time.Time, genesisValidatorsRoot []byte) (bool, error) {
|
||||
if genesisTime.IsZero() {
|
||||
return false, errors.New("genesis time is not set")
|
||||
|
||||
23
proto/dbval/BUILD.bazel
Normal file
23
proto/dbval/BUILD.bazel
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
|
||||
proto_library(
|
||||
name = "dbval_proto",
|
||||
srcs = ["dbval.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_proto_library(
|
||||
name = "dbval_go_proto",
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/proto/dbval",
|
||||
proto = ":dbval_proto",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
embed = [":dbval_go_proto"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/proto/dbval",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
197
proto/dbval/dbval.pb.go
generated
Executable file
197
proto/dbval/dbval.pb.go
generated
Executable file
@@ -0,0 +1,197 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.15.8
|
||||
// source: proto/dbval/dbval.proto
|
||||
|
||||
package dbval
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type BackfillStatus struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
HighSlot uint64 `protobuf:"varint,1,opt,name=high_slot,json=highSlot,proto3" json:"high_slot,omitempty"`
|
||||
HighRoot []byte `protobuf:"bytes,2,opt,name=high_root,json=highRoot,proto3" json:"high_root,omitempty"`
|
||||
LowSlot uint64 `protobuf:"varint,3,opt,name=low_slot,json=lowSlot,proto3" json:"low_slot,omitempty"`
|
||||
LowRoot []byte `protobuf:"bytes,4,opt,name=low_root,json=lowRoot,proto3" json:"low_root,omitempty"`
|
||||
OriginSlot uint64 `protobuf:"varint,5,opt,name=origin_slot,json=originSlot,proto3" json:"origin_slot,omitempty"`
|
||||
OriginRoot []byte `protobuf:"bytes,6,opt,name=origin_root,json=originRoot,proto3" json:"origin_root,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) Reset() {
|
||||
*x = BackfillStatus{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_dbval_dbval_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackfillStatus) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillStatus) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_dbval_dbval_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackfillStatus.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillStatus) Descriptor() ([]byte, []int) {
|
||||
return file_proto_dbval_dbval_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetHighSlot() uint64 {
|
||||
if x != nil {
|
||||
return x.HighSlot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetHighRoot() []byte {
|
||||
if x != nil {
|
||||
return x.HighRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetLowSlot() uint64 {
|
||||
if x != nil {
|
||||
return x.LowSlot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetLowRoot() []byte {
|
||||
if x != nil {
|
||||
return x.LowRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetOriginSlot() uint64 {
|
||||
if x != nil {
|
||||
return x.OriginSlot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetOriginRoot() []byte {
|
||||
if x != nil {
|
||||
return x.OriginRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_dbval_dbval_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_dbval_dbval_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x2f, 0x64, 0x62,
|
||||
0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x22, 0xc2, 0x01,
|
||||
0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x12, 0x1b, 0x0a, 0x09, 0x68, 0x69, 0x67, 0x68, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x69, 0x67, 0x68, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x1b, 0x0a,
|
||||
0x09, 0x68, 0x69, 0x67, 0x68, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x08, 0x68, 0x69, 0x67, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f,
|
||||
0x77, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6c, 0x6f,
|
||||
0x77, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x6f, 0x6f,
|
||||
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6c, 0x6f, 0x77, 0x52, 0x6f, 0x6f, 0x74,
|
||||
0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x6c, 0x6f,
|
||||
0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x6f, 0x6f, 0x74,
|
||||
0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x6f,
|
||||
0x6f, 0x74, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x62,
|
||||
0x76, 0x61, 0x6c, 0x3b, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_dbval_dbval_proto_rawDescOnce sync.Once
|
||||
file_proto_dbval_dbval_proto_rawDescData = file_proto_dbval_dbval_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_dbval_dbval_proto_rawDescGZIP() []byte {
|
||||
file_proto_dbval_dbval_proto_rawDescOnce.Do(func() {
|
||||
file_proto_dbval_dbval_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_dbval_dbval_proto_rawDescData)
|
||||
})
|
||||
return file_proto_dbval_dbval_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_dbval_dbval_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_proto_dbval_dbval_proto_goTypes = []interface{}{
|
||||
(*BackfillStatus)(nil), // 0: ethereum.eth.dbval.BackfillStatus
|
||||
}
|
||||
var file_proto_dbval_dbval_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_dbval_dbval_proto_init() }
|
||||
func file_proto_dbval_dbval_proto_init() {
|
||||
if File_proto_dbval_dbval_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_proto_dbval_dbval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BackfillStatus); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_dbval_dbval_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_dbval_dbval_proto_goTypes,
|
||||
DependencyIndexes: file_proto_dbval_dbval_proto_depIdxs,
|
||||
MessageInfos: file_proto_dbval_dbval_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_dbval_dbval_proto = out.File
|
||||
file_proto_dbval_dbval_proto_rawDesc = nil
|
||||
file_proto_dbval_dbval_proto_goTypes = nil
|
||||
file_proto_dbval_dbval_proto_depIdxs = nil
|
||||
}
|
||||
14
proto/dbval/dbval.proto
Normal file
14
proto/dbval/dbval.proto
Normal file
@@ -0,0 +1,14 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.dbval;
|
||||
|
||||
option go_package = "github.com/prysmaticlabs/prysm/v4/proto/dbval;dbval";
|
||||
|
||||
message BackfillStatus {
|
||||
uint64 high_slot = 1;
|
||||
bytes high_root = 2;
|
||||
uint64 low_slot = 3;
|
||||
bytes low_root = 4;
|
||||
uint64 origin_slot = 5;
|
||||
bytes origin_root = 6;
|
||||
}
|
||||
@@ -740,7 +740,7 @@ message ListValidatorAssignmentsRequest {
|
||||
}
|
||||
// 48 byte validator public keys to filter assignments for the given epoch.
|
||||
repeated bytes public_keys = 3 [(ethereum.eth.ext.ssz_size) = "?,48"];
|
||||
// Validator indicies to filter assignments for the given epoch.
|
||||
// Validator indices to filter assignments for the given epoch.
|
||||
repeated uint64 indices = 4 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.ValidatorIndex"];
|
||||
|
||||
// The maximum number of ValidatorAssignments to return in the response.
|
||||
|
||||
@@ -10,7 +10,7 @@ At the core of Ethereum Serenity lies the "Beacon Chain", a proof-of-stake based
|
||||
|---------|---------|---------|-------------|
|
||||
| eth | BeaconChain | v1alpha1 | This service is used to retrieve critical data relevant to the Ethereum Beacon Chain, including the most recent head block, current pending deposits, the chain state and more. |
|
||||
| eth | Node | v1alpha1 | The Node service returns information about the Ethereum node itself, including versioning and general information as well as network sync status and a list of services currently implemented on the node.
|
||||
| eth | Validator | v1alpha1 | This API provides the information a validator needs to retrieve throughout its lifecycle, including recieved assignments from the network, its current index in the state, as well the rewards and penalties that have been applied to it.
|
||||
| eth | Validator | v1alpha1 | This API provides the information a validator needs to retrieve throughout its lifecycle, including received assignments from the network, its current index in the state, as well the rewards and penalties that have been applied to it.
|
||||
|
||||
### JSON Mapping
|
||||
|
||||
|
||||
@@ -354,7 +354,11 @@ func startPProf(address string) {
|
||||
http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize))
|
||||
log.WithField("addr", fmt.Sprintf("http://%s/debug/pprof", address)).Info("Starting pprof server")
|
||||
go func() {
|
||||
if err := http.ListenAndServe(address, nil); err != nil {
|
||||
srv := &http.Server{
|
||||
Addr: address,
|
||||
ReadHeaderTimeout: 3 * time.Second,
|
||||
}
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Error("Failure in running pprof server", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -4,6 +4,7 @@ load("@prysm//tools/go:def.bzl", "go_test")
|
||||
# gazelle:exclude mainnet_e2e_test.go
|
||||
# gazelle:exclude mainnet_scenario_e2e_test.go
|
||||
# gazelle:exclude minimal_scenario_e2e_test.go
|
||||
# gazelle:exclude minimal_builder_e2e_test.go
|
||||
|
||||
# Presubmit tests represent the group of endtoend tests that are run on pull
|
||||
# requests and must be passing before a pull request can merge.
|
||||
@@ -25,6 +26,7 @@ test_suite(
|
||||
"manual",
|
||||
],
|
||||
tests = [
|
||||
":go_builder_test",
|
||||
":go_mainnet_test",
|
||||
],
|
||||
)
|
||||
@@ -117,6 +119,39 @@ go_test(
|
||||
deps = common_deps,
|
||||
)
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_builder_test",
|
||||
size = "large",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"component_handler_test.go",
|
||||
"endtoend_setup_test.go",
|
||||
"endtoend_test.go",
|
||||
"minimal_builder_e2e_test.go",
|
||||
],
|
||||
args = ["-test.v"],
|
||||
data = [
|
||||
"//:prysm_sh",
|
||||
"//cmd/beacon-chain",
|
||||
"//cmd/validator",
|
||||
"//config/params:custom_configs",
|
||||
"//tools/bootnode",
|
||||
"@com_github_ethereum_go_ethereum//cmd/geth",
|
||||
"@web3signer",
|
||||
],
|
||||
eth_network = "minimal",
|
||||
flaky = True,
|
||||
shard_count = 2,
|
||||
tags = [
|
||||
"e2e",
|
||||
"manual",
|
||||
"minimal",
|
||||
"requires-network",
|
||||
],
|
||||
deps = common_deps,
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_mainnet_test",
|
||||
size = "large",
|
||||
|
||||
@@ -28,6 +28,7 @@ type componentHandler struct {
|
||||
web3Signer e2etypes.ComponentRunner
|
||||
bootnode e2etypes.ComponentRunner
|
||||
eth1Miner e2etypes.ComponentRunner
|
||||
builders e2etypes.MultipleComponentRunners
|
||||
eth1Proxy e2etypes.MultipleComponentRunners
|
||||
eth1Nodes e2etypes.MultipleComponentRunners
|
||||
beaconNodes e2etypes.MultipleComponentRunners
|
||||
@@ -137,23 +138,47 @@ func (c *componentHandler) setup() {
|
||||
if config.TestCheckpointSync {
|
||||
appendDebugEndpoints(config)
|
||||
}
|
||||
// Proxies
|
||||
proxies := eth1.NewProxySet()
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes}); err != nil {
|
||||
return errors.Wrap(err, "proxies require execution nodes to run")
|
||||
}
|
||||
if err := proxies.Start(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to start proxies")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.eth1Proxy = proxies
|
||||
|
||||
var builders *components.BuilderSet
|
||||
var proxies *eth1.ProxySet
|
||||
if config.UseBuilder {
|
||||
// Builder
|
||||
builders = components.NewBuilderSet()
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes}); err != nil {
|
||||
return errors.Wrap(err, "builders require execution nodes to run")
|
||||
}
|
||||
if err := builders.Start(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to start builders")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.builders = builders
|
||||
} else {
|
||||
// Proxies
|
||||
proxies = eth1.NewProxySet()
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes}); err != nil {
|
||||
return errors.Wrap(err, "proxies require execution nodes to run")
|
||||
}
|
||||
if err := proxies.Start(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to start proxies")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.eth1Proxy = proxies
|
||||
}
|
||||
|
||||
// Beacon nodes.
|
||||
beaconNodes := components.NewBeaconNodes(config)
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{eth1Nodes, proxies, bootNode}); err != nil {
|
||||
wantedComponents := []e2etypes.ComponentRunner{eth1Nodes, bootNode}
|
||||
if config.UseBuilder {
|
||||
wantedComponents = append(wantedComponents, builders)
|
||||
} else {
|
||||
wantedComponents = append(wantedComponents, proxies)
|
||||
}
|
||||
if err := helpers.ComponentsStarted(ctx, wantedComponents); err != nil {
|
||||
return errors.Wrap(err, "beacon nodes require proxies, execution and boot node to run")
|
||||
}
|
||||
beaconNodes.SetENR(bootNode.ENR())
|
||||
@@ -215,7 +240,12 @@ func (c *componentHandler) setup() {
|
||||
func (c *componentHandler) required() []e2etypes.ComponentRunner {
|
||||
multiClientActive := e2e.TestParams.LighthouseBeaconNodeCount > 0
|
||||
requiredComponents := []e2etypes.ComponentRunner{
|
||||
c.tracingSink, c.eth1Nodes, c.bootnode, c.beaconNodes, c.validatorNodes, c.eth1Proxy,
|
||||
c.tracingSink, c.eth1Nodes, c.bootnode, c.beaconNodes, c.validatorNodes,
|
||||
}
|
||||
if c.cfg.UseBuilder {
|
||||
requiredComponents = append(requiredComponents, c.builders)
|
||||
} else {
|
||||
requiredComponents = append(requiredComponents, c.eth1Proxy)
|
||||
}
|
||||
if multiClientActive {
|
||||
requiredComponents = append(requiredComponents, []e2etypes.ComponentRunner{c.keygen, c.lighthouseBeaconNodes, c.lighthouseValidatorNodes}...)
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
srcs = [
|
||||
"beacon_node.go",
|
||||
"boot_node.go",
|
||||
"builder.go",
|
||||
"lighthouse_beacon.go",
|
||||
"lighthouse_validator.go",
|
||||
"log.go",
|
||||
@@ -35,6 +36,7 @@ go_library(
|
||||
"//testing/endtoend/helpers:go_default_library",
|
||||
"//testing/endtoend/params:go_default_library",
|
||||
"//testing/endtoend/types:go_default_library",
|
||||
"//testing/middleware/builder:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -282,6 +282,9 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
if !config.TestFeature || index%2 == 0 {
|
||||
args = append(args, features.E2EBeaconChainFlags...)
|
||||
}
|
||||
if config.UseBuilder {
|
||||
args = append(args, fmt.Sprintf("--%s=%s:%d", flags.MevRelayEndpoint.Name, "http://127.0.0.1", e2e.TestParams.Ports.Eth1ProxyPort+index))
|
||||
}
|
||||
args = append(args, config.BeaconFlags...)
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
|
||||
|
||||
213
testing/endtoend/components/builder.go
Normal file
213
testing/endtoend/components/builder.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package components
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/helpers"
|
||||
e2e "github.com/prysmaticlabs/prysm/v4/testing/endtoend/params"
|
||||
e2etypes "github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/middleware/builder"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// BuilderSet represents a set of builders for the validators running via a relay.
|
||||
type BuilderSet struct {
|
||||
e2etypes.ComponentRunner
|
||||
started chan struct{}
|
||||
builders []e2etypes.ComponentRunner
|
||||
}
|
||||
|
||||
// NewBuilderSet creates and returns a set of builders.
|
||||
func NewBuilderSet() *BuilderSet {
|
||||
return &BuilderSet{
|
||||
started: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts all the builders in set.
|
||||
func (s *BuilderSet) Start(ctx context.Context) error {
|
||||
totalNodeCount := e2e.TestParams.BeaconNodeCount + e2e.TestParams.LighthouseBeaconNodeCount
|
||||
nodes := make([]e2etypes.ComponentRunner, totalNodeCount)
|
||||
for i := 0; i < totalNodeCount; i++ {
|
||||
nodes[i] = NewBuilder(i)
|
||||
}
|
||||
s.builders = nodes
|
||||
|
||||
// Wait for all nodes to finish their job (blocking).
|
||||
// Once nodes are ready passed in handler function will be called.
|
||||
return helpers.WaitOnNodes(ctx, nodes, func() {
|
||||
// All nodes started, close channel, so that all services waiting on a set, can proceed.
|
||||
close(s.started)
|
||||
})
|
||||
}
|
||||
|
||||
// Started checks whether builder set is started and all builders are ready to be queried.
|
||||
func (s *BuilderSet) Started() <-chan struct{} {
|
||||
return s.started
|
||||
}
|
||||
|
||||
// Pause pauses the component and its underlying process.
|
||||
func (s *BuilderSet) Pause() error {
|
||||
for _, n := range s.builders {
|
||||
if err := n.Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the component and its underlying process.
|
||||
func (s *BuilderSet) Resume() error {
|
||||
for _, n := range s.builders {
|
||||
if err := n.Resume(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the component and its underlying process.
|
||||
func (s *BuilderSet) Stop() error {
|
||||
for _, n := range s.builders {
|
||||
if err := n.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PauseAtIndex pauses the component and its underlying process at the desired index.
|
||||
func (s *BuilderSet) PauseAtIndex(i int) error {
|
||||
if i >= len(s.builders) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.builders))
|
||||
}
|
||||
return s.builders[i].Pause()
|
||||
}
|
||||
|
||||
// ResumeAtIndex resumes the component and its underlying process at the desired index.
|
||||
func (s *BuilderSet) ResumeAtIndex(i int) error {
|
||||
if i >= len(s.builders) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.builders))
|
||||
}
|
||||
return s.builders[i].Resume()
|
||||
}
|
||||
|
||||
// StopAtIndex stops the component and its underlying process at the desired index.
|
||||
func (s *BuilderSet) StopAtIndex(i int) error {
|
||||
if i >= len(s.builders) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.builders))
|
||||
}
|
||||
return s.builders[i].Stop()
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *BuilderSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.builders) {
|
||||
return nil, errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.builders))
|
||||
}
|
||||
return s.builders[i], nil
|
||||
}
|
||||
|
||||
// Builder represents a block builder.
|
||||
type Builder struct {
|
||||
e2etypes.ComponentRunner
|
||||
started chan struct{}
|
||||
index int
|
||||
builder *builder.Builder
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// NewBuilder creates and returns a builder.
|
||||
func NewBuilder(index int) *Builder {
|
||||
return &Builder{
|
||||
started: make(chan struct{}, 1),
|
||||
index: index,
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs a builder.
|
||||
func (node *Builder) Start(ctx context.Context) error {
|
||||
f, err := os.Create(path.Join(e2e.TestParams.LogPath, "builder_"+strconv.Itoa(node.index)+".log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index)+"/")
|
||||
if node.index == 0 {
|
||||
jwtPath = path.Join(e2e.TestParams.TestPath, "eth1data/miner/")
|
||||
}
|
||||
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
|
||||
secret, err := parseJWTSecretFromFile(jwtPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := []builder.Option{
|
||||
builder.WithDestinationAddress(fmt.Sprintf("http://127.0.0.1:%d", e2e.TestParams.Ports.Eth1AuthRPCPort+node.index)),
|
||||
builder.WithPort(e2e.TestParams.Ports.Eth1ProxyPort + node.index),
|
||||
builder.WithLogger(logrus.New()),
|
||||
builder.WithLogFile(f),
|
||||
builder.WithJwtSecret(string(secret)),
|
||||
}
|
||||
bd, err := builder.New(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Starting builder %d with port: %d and file %s", node.index, e2e.TestParams.Ports.Eth1ProxyPort+node.index, f.Name())
|
||||
|
||||
// Set cancel into context.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
node.cancel = cancel
|
||||
node.builder = bd
|
||||
// Mark node as ready.
|
||||
close(node.started)
|
||||
return bd.Start(ctx)
|
||||
}
|
||||
|
||||
// Started checks whether the builder is started and ready to be queried.
|
||||
func (node *Builder) Started() <-chan struct{} {
|
||||
return node.started
|
||||
}
|
||||
|
||||
// Pause pauses the component and its underlying process.
|
||||
func (node *Builder) Pause() error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the component and its underlying process.
|
||||
func (node *Builder) Resume() error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop kills the component and its underlying process.
|
||||
func (node *Builder) Stop() error {
|
||||
node.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseJWTSecretFromFile(jwtSecretFile string) ([]byte, error) {
|
||||
enc, err := file.ReadFileAsBytes(jwtSecretFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
strData := strings.TrimSpace(string(enc))
|
||||
if strData == "" {
|
||||
return nil, fmt.Errorf("provided JWT secret in file %s cannot be empty", jwtSecretFile)
|
||||
}
|
||||
secret, err := hex.DecodeString(strings.TrimPrefix(strData, "0x"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(secret) < 32 {
|
||||
return nil, errors.New("provided JWT secret should be a hex string of at least 32 bytes")
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
@@ -46,7 +46,11 @@ func WaitForBlocks(web3 *ethclient.Client, key *keystore.Key, blocksToWait uint6
|
||||
finishBlock := block.NumberU64() + blocksToWait
|
||||
|
||||
for block.NumberU64() <= finishBlock {
|
||||
spamTX := types.NewTransaction(nonce, key.Address, big.NewInt(0), params.SpamTxGasLimit, big.NewInt(1e6), []byte{})
|
||||
gasPrice, err := web3.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spamTX := types.NewTransaction(nonce, key.Address, big.NewInt(0), params.SpamTxGasLimit, gasPrice, []byte{})
|
||||
signed, err := types.SignTx(spamTX, types.NewEIP155Signer(chainID), key.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -70,7 +70,7 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
}
|
||||
f := filler.NewFiller(rnd)
|
||||
// Broadcast Transactions every 3 blocks
|
||||
txPeriod := 3 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
txPeriod := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ticker := time.NewTicker(txPeriod)
|
||||
gasPrice := big.NewInt(1e11)
|
||||
for {
|
||||
@@ -99,16 +99,22 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nonce, err := backend.NonceAt(context.Background(), sender, big.NewInt(-1))
|
||||
nonce, err := backend.PendingNonceAt(context.Background(), sender)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expectedPrice, err := backend.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if expectedPrice.Cmp(gasPrice) > 0 {
|
||||
gasPrice = expectedPrice
|
||||
}
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
for i := uint64(0); i < N; i++ {
|
||||
index := i
|
||||
g.Go(func() error {
|
||||
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, gasPrice, nil, al)
|
||||
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, expectedPrice, nil, al)
|
||||
if err != nil {
|
||||
// In the event the transaction constructed is not valid, we continue with the routine
|
||||
// rather than complete stop it.
|
||||
|
||||
@@ -264,6 +264,9 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
|
||||
fmt.Sprintf("--%s=%d", flags.InteropStartIndex.Name, offset),
|
||||
)
|
||||
}
|
||||
if v.config.UseBuilder {
|
||||
args = append(args, fmt.Sprintf("--%s", flags.EnableBuilderFlag.Name))
|
||||
}
|
||||
args = append(args, config.ValidatorFlags...)
|
||||
|
||||
if v.config.UsePrysmShValidator {
|
||||
|
||||
@@ -86,6 +86,9 @@ func e2eMinimal(t *testing.T, v int, cfgo ...types.E2EConfigOpt) *testRunner {
|
||||
for _, o := range cfgo {
|
||||
o(testConfig)
|
||||
}
|
||||
if testConfig.UseBuilder {
|
||||
testConfig.Evaluators = append(testConfig.Evaluators, ev.BuilderIsActive)
|
||||
}
|
||||
|
||||
return newTestRunner(t, testConfig)
|
||||
}
|
||||
@@ -165,6 +168,9 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
|
||||
if testConfig.UseValidatorCrossClient {
|
||||
testConfig.Evaluators = append(testConfig.Evaluators, beaconapi_evaluators.BeaconAPIMultiClientVerifyIntegrity)
|
||||
}
|
||||
if testConfig.UseBuilder {
|
||||
testConfig.Evaluators = append(testConfig.Evaluators, ev.BuilderIsActive)
|
||||
}
|
||||
return newTestRunner(t, testConfig)
|
||||
}
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ func (r *testRunner) waitForMatchingHead(ctx context.Context, timeout time.Durat
|
||||
for {
|
||||
select {
|
||||
case <-dctx.Done():
|
||||
// deadline ensures that the test eventually exits when beacon node fails to sync in a resonable timeframe
|
||||
// deadline ensures that the test eventually exits when beacon node fails to sync in a reasonable timeframe
|
||||
elapsed := time.Since(start)
|
||||
return fmt.Errorf("deadline exceeded after %s waiting for known good block to appear in checkpoint-synced node", elapsed)
|
||||
default:
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
srcs = [
|
||||
"api_gateway_v1alpha1.go",
|
||||
"api_middleware.go",
|
||||
"builder.go",
|
||||
"data.go",
|
||||
"execution_engine.go",
|
||||
"fee_recipient.go",
|
||||
|
||||
103
testing/endtoend/evaluators/builder.go
Normal file
103
testing/endtoend/evaluators/builder.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/policies"
|
||||
e2etypes "github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// BuilderIsActive checks that the builder is indeed producing the respective payloads
|
||||
var BuilderIsActive = e2etypes.Evaluator{
|
||||
Name: "builder_is_active_at_epoch_%d",
|
||||
Policy: policies.OnwardsNthEpoch(helpers.BellatrixE2EForkEpoch),
|
||||
Evaluation: builderActive,
|
||||
}
|
||||
|
||||
func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewNodeClient(conn)
|
||||
beaconClient := ethpb.NewBeaconChainClient(conn)
|
||||
genesis, err := client.GetGenesis(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get genesis data")
|
||||
}
|
||||
currSlot := slots.CurrentSlot(uint64(genesis.GenesisTime.AsTime().Unix()))
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
lowestBound := primitives.Epoch(0)
|
||||
if currEpoch >= 1 {
|
||||
lowestBound = currEpoch - 1
|
||||
}
|
||||
|
||||
if lowestBound < helpers.BellatrixE2EForkEpoch {
|
||||
lowestBound = helpers.BellatrixE2EForkEpoch
|
||||
}
|
||||
blockCtrs, err := beaconClient.ListBeaconBlocks(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: lowestBound}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get beacon blocks")
|
||||
}
|
||||
for _, ctr := range blockCtrs.BlockContainers {
|
||||
b, err := syncCompatibleBlockFromCtr(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "block type doesn't exist for block at epoch %d", lowestBound)
|
||||
}
|
||||
|
||||
if b.IsNil() {
|
||||
return errors.New("nil block provided")
|
||||
}
|
||||
forkStartSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if forkStartSlot == b.Block().Slot() || forkStartSlot+1 == b.Block().Slot() {
|
||||
// Skip fork slot and the next one, as we don't send FCUs yet.
|
||||
continue
|
||||
}
|
||||
execPayload, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(execPayload.ExtraData()) != "prysm-builder" {
|
||||
return errors.Errorf("block with slot %d was not built by the builder. It has an extra data of %s", b.Block().Slot(), string(execPayload.ExtraData()))
|
||||
}
|
||||
}
|
||||
if lowestBound == currEpoch {
|
||||
return nil
|
||||
}
|
||||
blockCtrs, err = beaconClient.ListBeaconBlocks(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: currEpoch}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get validator participation")
|
||||
}
|
||||
for _, ctr := range blockCtrs.BlockContainers {
|
||||
b, err := syncCompatibleBlockFromCtr(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "block type doesn't exist for block at epoch %d", lowestBound)
|
||||
}
|
||||
if b.IsNil() {
|
||||
return errors.New("nil block provided")
|
||||
}
|
||||
forkStartSlot, err := slots.EpochStart(helpers.BellatrixE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if forkStartSlot == b.Block().Slot() || forkStartSlot+1 == b.Block().Slot() {
|
||||
// Skip fork slot and the next one, as we don't send FCUs yet.
|
||||
continue
|
||||
}
|
||||
execPayload, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(execPayload.ExtraData()) != "prysm-builder" {
|
||||
return errors.Errorf("block with slot %d was not built by the builder. It has an extra data of %s", b.Block().Slot(), string(execPayload.ExtraData()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func validatorsAreActive(ec *types.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
expectedCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
receivedCount := uint64(len(validators.ValidatorList))
|
||||
if expectedCount != receivedCount {
|
||||
return fmt.Errorf("expected validator count to be %d, recevied %d", expectedCount, receivedCount)
|
||||
return fmt.Errorf("expected validator count to be %d, received %d", expectedCount, receivedCount)
|
||||
}
|
||||
|
||||
effBalanceLowCount := 0
|
||||
|
||||
13
testing/endtoend/minimal_builder_e2e_test.go
Normal file
13
testing/endtoend/minimal_builder_e2e_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package endtoend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/endtoend/types"
|
||||
)
|
||||
|
||||
func TestEndToEnd_MinimalConfig_WithBuilder(t *testing.T) {
|
||||
r := e2eMinimal(t, version.Phase0, types.WithCheckpointSync(), types.WithBuilder())
|
||||
r.run()
|
||||
}
|
||||
@@ -44,6 +44,12 @@ func WithValidatorRESTApi() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithBuilder() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.UseBuilder = true
|
||||
}
|
||||
}
|
||||
|
||||
// E2EConfig defines the struct for all configurations needed for E2E testing.
|
||||
type E2EConfig struct {
|
||||
TestCheckpointSync bool
|
||||
@@ -56,6 +62,7 @@ type E2EConfig struct {
|
||||
UseFixedPeerIDs bool
|
||||
UseValidatorCrossClient bool
|
||||
UseBeaconRestApi bool
|
||||
UseBuilder bool
|
||||
EpochsToRun uint64
|
||||
Seed int64
|
||||
TracingSinkEndpoint string
|
||||
|
||||
35
testing/middleware/builder/BUILD.bazel
Normal file
35
testing/middleware/builder/BUILD.bazel
Normal file
@@ -0,0 +1,35 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"builder.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/testing/middleware/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//beacon/engine:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
659
testing/middleware/builder/builder.go
Normal file
659
testing/middleware/builder/builder.go
Normal file
@@ -0,0 +1,659 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
gMux "github.com/gorilla/mux"
|
||||
builderAPI "github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
statusPath = "/eth/v1/builder/status"
|
||||
registerPath = "/eth/v1/builder/validators"
|
||||
headerPath = "/eth/v1/builder/header/{slot:[0-9]+}/{parent_hash:0x[a-fA-F0-9]+}/{pubkey:0x[a-fA-F0-9]+}"
|
||||
blindedPath = "/eth/v1/builder/blinded_blocks"
|
||||
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
|
||||
// GetPayloadMethod v1 request string for JSON-RPC.
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
)
|
||||
|
||||
var (
|
||||
defaultBuilderHost = "127.0.0.1"
|
||||
defaultBuilderPort = 8551
|
||||
)
|
||||
|
||||
type jsonRPCObject struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params"`
|
||||
ID uint64 `json:"id"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
type ForkchoiceUpdatedResponse struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params"`
|
||||
ID uint64 `json:"id"`
|
||||
Result struct {
|
||||
Status *v1.PayloadStatus `json:"payloadStatus"`
|
||||
PayloadId *v1.PayloadIDBytes `json:"payloadId"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
type ExecPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *v1.ExecutionPayload `json:"data"`
|
||||
}
|
||||
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *builderAPI.BuilderBidCapella `json:"message"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
cfg *config
|
||||
address string
|
||||
execClient *gethRPC.Client
|
||||
currId *v1.PayloadIDBytes
|
||||
currPayload interfaces.ExecutionData
|
||||
mux *gMux.Router
|
||||
validatorMap map[string]*eth.ValidatorRegistrationV1
|
||||
srv *http.Server
|
||||
}
|
||||
|
||||
// New creates a proxy server forwarding requests from a consensus client to an execution client.
|
||||
func New(opts ...Option) (*Builder, error) {
|
||||
p := &Builder{
|
||||
cfg: &config{
|
||||
builderPort: defaultBuilderPort,
|
||||
builderHost: defaultBuilderHost,
|
||||
logger: logrus.New(),
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if p.cfg.destinationUrl == nil {
|
||||
return nil, errors.New("must provide a destination address for request proxying")
|
||||
}
|
||||
endpoint := network.HttpEndpoint(p.cfg.destinationUrl.String())
|
||||
endpoint.Auth.Method = authorization.Bearer
|
||||
endpoint.Auth.Value = p.cfg.secret
|
||||
execClient, err := network.NewExecutionRPCClient(context.Background(), endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", p)
|
||||
router := gMux.NewRouter()
|
||||
router.HandleFunc(statusPath, func(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
})
|
||||
router.HandleFunc(registerPath, p.registerValidators)
|
||||
router.HandleFunc(headerPath, p.handleHeaderRequest)
|
||||
router.HandleFunc(blindedPath, p.handleBlindedBlock)
|
||||
addr := fmt.Sprintf("%s:%d", p.cfg.builderHost, p.cfg.builderPort)
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
Addr: addr,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
p.address = addr
|
||||
p.srv = srv
|
||||
p.execClient = execClient
|
||||
p.validatorMap = map[string]*eth.ValidatorRegistrationV1{}
|
||||
p.mux = router
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Address for the proxy server.
|
||||
func (p *Builder) Address() string {
|
||||
return p.address
|
||||
}
|
||||
|
||||
// Start a proxy server.
|
||||
func (p *Builder) Start(ctx context.Context) error {
|
||||
p.srv.BaseContext = func(listener net.Listener) context.Context {
|
||||
return ctx
|
||||
}
|
||||
p.cfg.logger.WithFields(logrus.Fields{
|
||||
"executionAddress": p.cfg.destinationUrl.String(),
|
||||
}).Infof("Builder now listening on address %s", p.address)
|
||||
go func() {
|
||||
if err := p.srv.ListenAndServe(); err != nil {
|
||||
p.cfg.logger.Error(err)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
<-ctx.Done()
|
||||
return p.srv.Shutdown(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP requests from a consensus client to an execution client, modifying in-flight requests
|
||||
// and/or responses as desired. It also processes any backed-up requests.
|
||||
func (p *Builder) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
p.cfg.logger.Infof("Received %s request from beacon with url: %s", r.Method, r.URL.Path)
|
||||
if p.isBuilderCall(r) {
|
||||
p.mux.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
requestBytes, err := parseRequestBytes(r)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not parse request")
|
||||
return
|
||||
}
|
||||
execRes, err := p.sendHttpRequest(r, requestBytes)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not forward request")
|
||||
return
|
||||
}
|
||||
p.cfg.logger.Infof("Received response for %s request with method %s from %s", r.Method, r.Method, p.cfg.destinationUrl.String())
|
||||
|
||||
defer func() {
|
||||
if err = execRes.Body.Close(); err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not do close proxy responseGen body")
|
||||
}
|
||||
}()
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
if _, err = io.Copy(buf, execRes.Body); err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
byteResp := bytesutil.SafeCopyBytes(buf.Bytes())
|
||||
p.handleEngineCalls(requestBytes, byteResp)
|
||||
// Pipe the proxy responseGen to the original caller.
|
||||
if _, err = io.Copy(w, buf); err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not copy proxy request body")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Builder) handleEngineCalls(req, resp []byte) {
|
||||
if !isEngineAPICall(req) {
|
||||
return
|
||||
}
|
||||
rpcObj, err := unmarshalRPCObject(req)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not unmarshal rpc object")
|
||||
return
|
||||
}
|
||||
p.cfg.logger.Infof("Received engine call %s", rpcObj.Method)
|
||||
switch rpcObj.Method {
|
||||
case ForkchoiceUpdatedMethod, ForkchoiceUpdatedMethodV2:
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
err = json.Unmarshal(resp, result)
|
||||
if err != nil {
|
||||
p.cfg.logger.Errorf("Could not unmarshal fcu: %v", err)
|
||||
return
|
||||
}
|
||||
p.currId = result.Result.PayloadId
|
||||
p.cfg.logger.Infof("Received payload id of %#x", result.Result.PayloadId)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Builder) isBuilderCall(req *http.Request) bool {
|
||||
return strings.Contains(req.URL.Path, "/eth/v1/builder/")
|
||||
}
|
||||
|
||||
func (p *Builder) registerValidators(w http.ResponseWriter, req *http.Request) {
|
||||
registrations := []builderAPI.SignedValidatorRegistration{}
|
||||
if err := json.NewDecoder(req.Body).Decode(®istrations); err != nil {
|
||||
http.Error(w, "invalid request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, r := range registrations {
|
||||
msg := r.Message
|
||||
p.validatorMap[string(r.Message.Pubkey)] = msg
|
||||
}
|
||||
// TODO: Verify Signatures from validators
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *Builder) handleHeaderRequest(w http.ResponseWriter, req *http.Request) {
|
||||
urlParams := gMux.Vars(req)
|
||||
pHash := urlParams["parent_hash"]
|
||||
if pHash == "" {
|
||||
http.Error(w, "no valid parent hash", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
reqSlot := urlParams["slot"]
|
||||
if reqSlot == "" {
|
||||
http.Error(w, "no valid slot provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
slot, err := strconv.Atoi(reqSlot)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid slot provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ax := types.Slot(slot)
|
||||
currEpoch := types.Epoch(ax / params.BeaconConfig().SlotsPerEpoch)
|
||||
if currEpoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
p.handleHeadeRequestCapella(w)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := p.retrievePendingBlock()
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not retrieve pending block")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
secKey, err := bls.RandKey()
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not retrieve secret key")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
wObj, err := blocks.WrappedExecutionPayload(b)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not wrap execution payload")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
hdr, err := blocks.PayloadToHeader(wObj)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not make payload into header")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
gEth := big.NewInt(int64(params.BeaconConfig().GweiPerEth))
|
||||
weiEth := gEth.Mul(gEth, gEth)
|
||||
val := builderAPI.Uint256{Int: weiEth}
|
||||
wrappedHdr := &builderAPI.ExecutionPayloadHeader{ExecutionPayloadHeader: hdr}
|
||||
bid := &builderAPI.BuilderBid{
|
||||
Header: wrappedHdr,
|
||||
Value: val,
|
||||
Pubkey: secKey.PublicKey().Marshal(),
|
||||
}
|
||||
sszBid := ð.BuilderBid{
|
||||
Header: hdr,
|
||||
Value: val.SSZBytes(),
|
||||
Pubkey: secKey.PublicKey().Marshal(),
|
||||
}
|
||||
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not compute the domain")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
rt, err := signing.ComputeSigningRoot(sszBid, d)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not compute the signing root")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
sig := secKey.Sign(rt[:])
|
||||
hdrResp := &builderAPI.ExecHeaderResponse{
|
||||
Version: "bellatrix",
|
||||
Data: struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *builderAPI.BuilderBid `json:"message"`
|
||||
}{
|
||||
Signature: sig.Marshal(),
|
||||
Message: bid,
|
||||
},
|
||||
}
|
||||
|
||||
err = json.NewEncoder(w).Encode(hdrResp)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not encode response")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
p.currPayload = wObj
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *Builder) handleHeadeRequestCapella(w http.ResponseWriter) {
|
||||
b, err := p.retrievePendingBlockCapella()
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not retrieve pending block")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
secKey, err := bls.RandKey()
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not retrieve secret key")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.Value))
|
||||
v = v.Mul(v, big.NewInt(2))
|
||||
// Is used as the helper modifies the big.Int
|
||||
weiVal := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.Value))
|
||||
weiVal = weiVal.Mul(weiVal, big.NewInt(2))
|
||||
wObj, err := blocks.WrappedExecutionPayloadCapella(b.Payload, math.WeiToGwei(weiVal))
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not wrap execution payload")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
hdr, err := blocks.PayloadToHeaderCapella(wObj)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not make payload into header")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
val := builderAPI.Uint256{Int: v}
|
||||
wrappedHdr := &builderAPI.ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: hdr}
|
||||
bid := &builderAPI.BuilderBidCapella{
|
||||
Header: wrappedHdr,
|
||||
Value: val,
|
||||
Pubkey: secKey.PublicKey().Marshal(),
|
||||
}
|
||||
sszBid := ð.BuilderBidCapella{
|
||||
Header: hdr,
|
||||
Value: val.SSZBytes(),
|
||||
Pubkey: secKey.PublicKey().Marshal(),
|
||||
}
|
||||
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not compute the domain")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
rt, err := signing.ComputeSigningRoot(sszBid, d)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not compute the signing root")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
sig := secKey.Sign(rt[:])
|
||||
hdrResp := &ExecHeaderResponseCapella{
|
||||
Version: "capella",
|
||||
Data: struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *builderAPI.BuilderBidCapella `json:"message"`
|
||||
}{
|
||||
Signature: sig.Marshal(),
|
||||
Message: bid,
|
||||
},
|
||||
}
|
||||
|
||||
err = json.NewEncoder(w).Encode(hdrResp)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not encode response")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
p.currPayload = wObj
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *Builder) handleBlindedBlock(w http.ResponseWriter, req *http.Request) {
|
||||
sb := &builderAPI.SignedBlindedBeaconBlockBellatrix{
|
||||
SignedBlindedBeaconBlockBellatrix: ð.SignedBlindedBeaconBlockBellatrix{},
|
||||
}
|
||||
err := json.NewDecoder(req.Body).Decode(sb)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not decode blinded block")
|
||||
// TODO: Allow the method to unmarshal blinded blocks correctly
|
||||
}
|
||||
if p.currPayload == nil {
|
||||
p.cfg.logger.Error("No payload is cached")
|
||||
http.Error(w, "payload not found", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if payload, err := p.currPayload.PbCapella(); err == nil {
|
||||
convertedPayload, err := builderAPI.FromProtoCapella(payload)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not convert the payload")
|
||||
http.Error(w, "payload not found", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
execResp := &builderAPI.ExecPayloadResponseCapella{
|
||||
Version: "capella",
|
||||
Data: convertedPayload,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(execResp)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not encode full payload response")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
bellPayload, err := p.currPayload.PbBellatrix()
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not retrieve the payload")
|
||||
http.Error(w, "payload not found", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
convertedPayload, err := builderAPI.FromProto(bellPayload)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not convert the payload")
|
||||
http.Error(w, "payload not found", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
execResp := &builderAPI.ExecPayloadResponse{
|
||||
Version: "bellatrix",
|
||||
Data: convertedPayload,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(execResp)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not encode full payload response")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *Builder) retrievePendingBlock() (*v1.ExecutionPayload, error) {
|
||||
result := &engine.ExecutableData{}
|
||||
if p.currId == nil {
|
||||
return nil, errors.New("no payload id is cached")
|
||||
}
|
||||
err := p.execClient.CallContext(context.Background(), result, GetPayloadMethod, *p.currId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadEnv, err := modifyExecutionPayload(*result, big.NewInt(0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalledOutput, err := payloadEnv.ExecutionPayload.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bellatrixPayload := &v1.ExecutionPayload{}
|
||||
if err = json.Unmarshal(marshalledOutput, bellatrixPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bellatrixPayload, nil
|
||||
}
|
||||
|
||||
func (p *Builder) retrievePendingBlockCapella() (*v1.ExecutionPayloadCapellaWithValue, error) {
|
||||
result := &engine.ExecutionPayloadEnvelope{}
|
||||
if p.currId == nil {
|
||||
return nil, errors.New("no payload id is cached")
|
||||
}
|
||||
err := p.execClient.CallContext(context.Background(), result, GetPayloadMethodV2, *p.currId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadEnv, err := modifyExecutionPayload(*result.ExecutionPayload, result.BlockValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalledOutput, err := payloadEnv.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
capellaPayload := &v1.ExecutionPayloadCapellaWithValue{}
|
||||
if err = json.Unmarshal(marshalledOutput, capellaPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return capellaPayload, nil
|
||||
}
|
||||
|
||||
func (p *Builder) sendHttpRequest(req *http.Request, requestBytes []byte) (*http.Response, error) {
|
||||
proxyReq, err := http.NewRequest(req.Method, p.cfg.destinationUrl.String(), req.Body)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not create new request")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the modified request as the proxy request body.
|
||||
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
|
||||
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
|
||||
proxyReq.Header.Set("Host", req.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", req.RemoteAddr)
|
||||
proxyReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
if p.cfg.secret != "" {
|
||||
client = network.NewHttpClientWithSecret(p.cfg.secret)
|
||||
}
|
||||
proxyRes, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not forward request to destination server")
|
||||
return nil, err
|
||||
}
|
||||
return proxyRes, nil
|
||||
}
|
||||
|
||||
// Peek into the bytes of an HTTP request's body.
|
||||
func parseRequestBytes(req *http.Request) ([]byte, error) {
|
||||
requestBytes, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = req.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
|
||||
return requestBytes, nil
|
||||
}
|
||||
|
||||
// Checks whether the JSON-RPC request is for the Ethereum engine API.
|
||||
func isEngineAPICall(reqBytes []byte) bool {
|
||||
jsonRequest, err := unmarshalRPCObject(reqBytes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "cannot unmarshal array"):
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return strings.Contains(jsonRequest.Method, "engine_")
|
||||
}
|
||||
|
||||
func unmarshalRPCObject(b []byte) (*jsonRPCObject, error) {
|
||||
r := &jsonRPCObject{}
|
||||
if err := json.Unmarshal(b, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func modifyExecutionPayload(execPayload engine.ExecutableData, fees *big.Int) (*engine.ExecutionPayloadEnvelope, error) {
|
||||
modifiedBlock, err := executableDataToBlock(execPayload)
|
||||
if err != nil {
|
||||
return &engine.ExecutionPayloadEnvelope{}, err
|
||||
}
|
||||
return engine.BlockToExecutableData(modifiedBlock, fees), nil
|
||||
}
|
||||
|
||||
// This modifies the provided payload to imprint the builder's extra data
|
||||
func executableDataToBlock(params engine.ExecutableData) (*gethTypes.Block, error) {
|
||||
txs, err := decodeTransactions(params.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only set withdrawalsRoot if it is non-nil. This allows CLs to use
|
||||
// ExecutableData before withdrawals are enabled by marshaling
|
||||
// Withdrawals as the json null value.
|
||||
var withdrawalsRoot *common.Hash
|
||||
if params.Withdrawals != nil {
|
||||
h := gethTypes.DeriveSha(gethTypes.Withdrawals(params.Withdrawals), trie.NewStackTrie(nil))
|
||||
withdrawalsRoot = &h
|
||||
}
|
||||
header := &gethTypes.Header{
|
||||
ParentHash: params.ParentHash,
|
||||
UncleHash: gethTypes.EmptyUncleHash,
|
||||
Coinbase: params.FeeRecipient,
|
||||
Root: params.StateRoot,
|
||||
TxHash: gethTypes.DeriveSha(gethTypes.Transactions(txs), trie.NewStackTrie(nil)),
|
||||
ReceiptHash: params.ReceiptsRoot,
|
||||
Bloom: gethTypes.BytesToBloom(params.LogsBloom),
|
||||
Difficulty: common.Big0,
|
||||
Number: new(big.Int).SetUint64(params.Number),
|
||||
GasLimit: params.GasLimit,
|
||||
GasUsed: params.GasUsed,
|
||||
Time: params.Timestamp,
|
||||
BaseFee: params.BaseFeePerGas,
|
||||
Extra: []byte("prysm-builder"), // add in extra data
|
||||
MixDigest: params.Random,
|
||||
WithdrawalsHash: withdrawalsRoot,
|
||||
}
|
||||
block := gethTypes.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals)
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func decodeTransactions(enc [][]byte) ([]*gethTypes.Transaction, error) {
|
||||
var txs = make([]*gethTypes.Transaction, len(enc))
|
||||
for i, encTx := range enc {
|
||||
var tx gethTypes.Transaction
|
||||
if err := tx.UnmarshalBinary(encTx); err != nil {
|
||||
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
|
||||
}
|
||||
txs[i] = &tx
|
||||
}
|
||||
return txs, nil
|
||||
}
|
||||
79
testing/middleware/builder/options.go
Normal file
79
testing/middleware/builder/options.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
builderPort int
|
||||
builderHost string
|
||||
destinationUrl *url.URL
|
||||
logger *logrus.Logger
|
||||
secret string
|
||||
}
|
||||
|
||||
type Option func(p *Builder) error
|
||||
|
||||
// WithHost sets the proxy server host.
|
||||
func WithHost(host string) Option {
|
||||
return func(p *Builder) error {
|
||||
p.cfg.builderHost = host
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPort sets the proxy server port.
|
||||
func WithPort(port int) Option {
|
||||
return func(p *Builder) error {
|
||||
p.cfg.builderPort = port
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDestinationAddress sets the forwarding address requests will be sent to.
|
||||
func WithDestinationAddress(addr string) Option {
|
||||
return func(p *Builder) error {
|
||||
if addr == "" {
|
||||
return errors.New("must provide a destination address for builder")
|
||||
}
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not parse URL for destination address: %s", addr)
|
||||
}
|
||||
p.cfg.destinationUrl = u
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithJwtSecret adds in support for jwt authenticated
|
||||
// connections for our proxy.
|
||||
func WithJwtSecret(secret string) Option {
|
||||
return func(p *Builder) error {
|
||||
p.cfg.secret = secret
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger sets a custom logger for the proxy.
|
||||
func WithLogger(l *logrus.Logger) Option {
|
||||
return func(p *Builder) error {
|
||||
p.cfg.logger = l
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogFile specifies a log file to write
|
||||
// the proxies output to.
|
||||
func WithLogFile(f *os.File) Option {
|
||||
return func(p *Builder) error {
|
||||
if p.cfg.logger == nil {
|
||||
return errors.New("nil logger provided")
|
||||
}
|
||||
p.cfg.logger.SetOutput(f)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func (s *Simulator) generateAttestationsForSlot(
|
||||
}
|
||||
|
||||
func (s *Simulator) aggregateSigForAttestation(
|
||||
beaconState state.BeaconState, att *ethpb.IndexedAttestation,
|
||||
beaconState state.ReadOnlyBeaconState, att *ethpb.IndexedAttestation,
|
||||
) (bls.Signature, error) {
|
||||
domain, err := signing.Domain(
|
||||
beaconState.Fork(),
|
||||
|
||||
4
third_party/usb/usb_disabled.go
vendored
4
third_party/usb/usb_disabled.go
vendored
@@ -16,7 +16,7 @@
|
||||
package usb
|
||||
|
||||
// Supported returns whether this platform is supported by the USB library or not.
|
||||
// The goal of this method is to allow programatically handling platforms that do
|
||||
// The goal of this method is to allow programmatically handling platforms that do
|
||||
// not support USB and not having to fall back to build constraints.
|
||||
func Supported() bool {
|
||||
return false
|
||||
@@ -43,7 +43,7 @@ func EnumerateHid(vendorID uint16, productID uint16) ([]DeviceInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Open connects to a previsouly discovered USB device.
|
||||
// Open connects to a previously discovered USB device.
|
||||
func (info DeviceInfo) Open() (Device, error) {
|
||||
return nil, ErrUnsupportedPlatform
|
||||
}
|
||||
|
||||
@@ -114,7 +114,13 @@ func main() {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/p2p", handler.httpHandler)
|
||||
|
||||
if err := http.ListenAndServe(fmt.Sprintf(":%d", *metricsPort), mux); err != nil {
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", *metricsPort),
|
||||
ReadHeaderTimeout: 3 * time.Second,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.WithError(err).Fatal("Failed to start server")
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,11 @@ func main() {
|
||||
|
||||
http.HandleFunc("/metrics", MetricsHTTP)
|
||||
http.HandleFunc("/reload", ReloadHTTP)
|
||||
log.Fatal(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", *port), nil))
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf("127.0.0.1:%d", *port),
|
||||
ReadHeaderTimeout: 3 * time.Second,
|
||||
}
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
|
||||
// Watching address wrapper
|
||||
|
||||
@@ -78,7 +78,7 @@ func displayHeads(clients map[string]pb.BeaconChainClient) {
|
||||
}
|
||||
}
|
||||
|
||||
// compare heads between all RPC end points, log the missmatch if there's one.
|
||||
// compare heads between all RPC end points, log the mismatch if there's one.
|
||||
func compareHeads(clients map[string]pb.BeaconChainClient) {
|
||||
endpt1 := randomEndpt(clients)
|
||||
head1, err := clients[endpt1].GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
@@ -101,7 +101,7 @@ func compareHeads(clients map[string]pb.BeaconChainClient) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(head1, head2) {
|
||||
log.Error("Uh oh! Heads missmatched!")
|
||||
log.Error("Uh oh! Heads mismatched!")
|
||||
logHead(endpt1, head1)
|
||||
logHead(endpt2, head2)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
)
|
||||
@@ -51,7 +52,11 @@ func main() {
|
||||
}
|
||||
})
|
||||
log.Printf("Listening on port %d", *port)
|
||||
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil))
|
||||
srv := &http.Server{
|
||||
Addr: ":" + strconv.Itoa(*port),
|
||||
ReadHeaderTimeout: 3 * time.Second,
|
||||
}
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
|
||||
func captureRequest(f *os.File, m map[string]interface{}) error {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
var (
|
||||
numKeys = flag.Int("num-keys", 0, "Number of validator private/withdrawal keys to generate")
|
||||
startIndex = flag.Uint64("start-index", 0, "Start index for the determinstic keygen algorithm")
|
||||
startIndex = flag.Uint64("start-index", 0, "Start index for the deterministic keygen algorithm")
|
||||
random = flag.Bool("random", false, "Randomly generate keys")
|
||||
outputJSON = flag.String("output-json", "", "JSON file to write output to")
|
||||
overwrite = flag.Bool("overwrite", false, "If the key file exists, it will be overwritten")
|
||||
|
||||
@@ -31,7 +31,7 @@ const (
|
||||
RoleAggregator
|
||||
// RoleSyncCommittee means that the validator should submit a sync committee message.
|
||||
RoleSyncCommittee
|
||||
// RoleSyncCommitteeAggregator means the valiator should aggregate sync committee messages and submit a sync committee contribution.
|
||||
// RoleSyncCommitteeAggregator means the validator should aggregate sync committee messages and submit a sync committee contribution.
|
||||
RoleSyncCommitteeAggregator
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user