mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
1 Commits
use-method
...
fix-proces
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ed16d9526 |
12
BUILD.bazel
12
BUILD.bazel
@@ -1,4 +1,3 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
@@ -287,14 +286,3 @@ sh_binary(
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["derp.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
@@ -194,10 +195,18 @@ func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(source, currentEpoch) {
|
||||
sourceValidator, err := state_native.NewValidator(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(sourceValidator, currentEpoch) {
|
||||
return errors.New("source is not active")
|
||||
}
|
||||
if !helpers.IsActiveValidator(target, currentEpoch) {
|
||||
targetValidator, err := state_native.NewValidator(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsActiveValidator(targetValidator, currentEpoch) {
|
||||
return errors.New("target is not active")
|
||||
}
|
||||
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -123,7 +124,12 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
roValidator, err := state_native.NewValidator(validator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !helpers.IsActiveValidator(roValidator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
// by activation epoch and by index number.
|
||||
type sortableIndices struct {
|
||||
indices []primitives.ValidatorIndex
|
||||
validators []*ethpb.Validator
|
||||
validators []state.ReadOnlyValidator
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
@@ -39,10 +40,10 @@ func (s sortableIndices) Swap(i, j int) { s.indices[i], s.indices[j] = s.indices
|
||||
|
||||
// Less reports whether the element with index i must sort before the element with index j.
|
||||
func (s sortableIndices) Less(i, j int) bool {
|
||||
if s.validators[s.indices[i]].ActivationEligibilityEpoch == s.validators[s.indices[j]].ActivationEligibilityEpoch {
|
||||
if s.validators[s.indices[i]].ActivationEligibilityEpoch() == s.validators[s.indices[j]].ActivationEligibilityEpoch() {
|
||||
return s.indices[i] < s.indices[j]
|
||||
}
|
||||
return s.validators[s.indices[i]].ActivationEligibilityEpoch < s.validators[s.indices[j]].ActivationEligibilityEpoch
|
||||
return s.validators[s.indices[i]].ActivationEligibilityEpoch() < s.validators[s.indices[j]].ActivationEligibilityEpoch()
|
||||
}
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
@@ -93,22 +94,30 @@ func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts
|
||||
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
vals := state.Validators()
|
||||
vals := state.ValidatorsReadOnly()
|
||||
var err error
|
||||
ejectionBal := params.BeaconConfig().EjectionBalance
|
||||
activationEligibilityEpoch := time.CurrentEpoch(state) + 1
|
||||
for idx, validator := range vals {
|
||||
// Process the validators for activation eligibility.
|
||||
if helpers.IsEligibleForActivationQueue(validator, currentEpoch) {
|
||||
validator.ActivationEligibilityEpoch = activationEligibilityEpoch
|
||||
if err := state.UpdateValidatorAtIndex(primitives.ValidatorIndex(idx), validator); err != nil {
|
||||
v, err := state.ValidatorAtIndex(primitives.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.ActivationEligibilityEpoch = activationEligibilityEpoch
|
||||
if err := state.UpdateValidatorAtIndex(primitives.ValidatorIndex(idx), v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator, err = state_native.NewValidator(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Process the validators for ejection.
|
||||
isActive := helpers.IsActiveValidator(validator, currentEpoch)
|
||||
belowEjectionBalance := validator.EffectiveBalance <= ejectionBal
|
||||
belowEjectionBalance := validator.EffectiveBalance() <= ejectionBal
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
@@ -122,6 +131,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
|
||||
// Queue validators eligible for activation and not yet dequeued for activation.
|
||||
var activationQ []primitives.ValidatorIndex
|
||||
vals = state.ValidatorsReadOnly()
|
||||
for idx, validator := range vals {
|
||||
if helpers.IsEligibleForActivation(state, validator) {
|
||||
activationQ = append(activationQ, primitives.ValidatorIndex(idx))
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -92,14 +91,6 @@ func IsAggregated(attestation ethpb.Att) bool {
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndex := 0
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
if len(committeeIndices) > 0 {
|
||||
committeeIndex = committeeIndices[0]
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, primitives.CommitteeIndex(committeeIndex), att.GetData().Slot)
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
|
||||
}
|
||||
|
||||
|
||||
@@ -73,37 +73,21 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34))
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
Source: nil,
|
||||
Target: nil,
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(att.Data.Slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(4, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
AggregationBits: []byte{'A'},
|
||||
CommitteeBits: cb,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
}
|
||||
|
||||
func Test_ValidateAttestationTime(t *testing.T) {
|
||||
|
||||
@@ -41,8 +41,8 @@ var (
|
||||
// Check if ``validator`` is active.
|
||||
// """
|
||||
// return validator.activation_epoch <= epoch < validator.exit_epoch
|
||||
func IsActiveValidator(validator *ethpb.Validator, epoch primitives.Epoch) bool {
|
||||
return checkValidatorActiveStatus(validator.ActivationEpoch, validator.ExitEpoch, epoch)
|
||||
func IsActiveValidator(validator state.ReadOnlyValidator, epoch primitives.Epoch) bool {
|
||||
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
|
||||
}
|
||||
|
||||
// IsActiveValidatorUsingTrie checks if a read only validator is active.
|
||||
@@ -404,11 +404,11 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []primi
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
// and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
|
||||
// )
|
||||
func IsEligibleForActivationQueue(validator *ethpb.Validator, currentEpoch primitives.Epoch) bool {
|
||||
func IsEligibleForActivationQueue(validator state.ReadOnlyValidator, currentEpoch primitives.Epoch) bool {
|
||||
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
|
||||
}
|
||||
return isEligibleForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance)
|
||||
return isEligibleForActivationQueue(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
|
||||
}
|
||||
|
||||
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue
|
||||
@@ -459,9 +459,9 @@ func isEligibleForActivationQueueElectra(activationEligibilityEpoch primitives.E
|
||||
// # Has not yet been activated
|
||||
// and validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator *ethpb.Validator) bool {
|
||||
func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator state.ReadOnlyValidator) bool {
|
||||
finalizedEpoch := state.FinalizedCheckpointEpoch()
|
||||
return isEligibleForActivation(validator.ActivationEligibilityEpoch, validator.ActivationEpoch, finalizedEpoch)
|
||||
return isEligibleForActivation(validator.ActivationEligibilityEpoch(), validator.ActivationEpoch(), finalizedEpoch)
|
||||
}
|
||||
|
||||
// IsEligibleForActivationUsingTrie checks if the validator is eligible for activation.
|
||||
|
||||
@@ -69,6 +69,7 @@ go_library(
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -196,3 +197,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureFastSSZHashingAlgorithm() {
|
||||
fastssz.EnableVectorizedHTR = true
|
||||
}
|
||||
|
||||
@@ -277,6 +277,8 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,8 +27,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
switch topic {
|
||||
case BlockSubnetTopicFormat:
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedBeaconBlockElectra{}
|
||||
}
|
||||
@@ -44,25 +43,8 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttestationSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttestationElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttesterSlashingSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttesterSlashingElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AggregateAndProofSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
default:
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
|
||||
// AllTopics returns all topics stored in our
|
||||
@@ -93,7 +75,4 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Electra objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttestationElectra{})] = AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -22,20 +22,20 @@ func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
altairForkEpoch := primitives.Epoch(100)
|
||||
bellatrixForkEpoch := primitives.Epoch(200)
|
||||
capellaForkEpoch := primitives.Epoch(300)
|
||||
denebForkEpoch := primitives.Epoch(400)
|
||||
electraForkEpoch := primitives.Epoch(500)
|
||||
BellatrixForkEpoch := primitives.Epoch(200)
|
||||
CapellaForkEpoch := primitives.Epoch(300)
|
||||
DenebForkEpoch := primitives.Epoch(400)
|
||||
ElectraForkEpoch := primitives.Epoch(500)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = capellaForkEpoch
|
||||
bCfg.DenebForkEpoch = denebForkEpoch
|
||||
bCfg.ElectraForkEpoch = electraForkEpoch
|
||||
bCfg.BellatrixForkEpoch = BellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = CapellaForkEpoch
|
||||
bCfg.DenebForkEpoch = DenebForkEpoch
|
||||
bCfg.ElectraForkEpoch = ElectraForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
|
||||
@@ -47,83 +47,29 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, BellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, CapellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, DenebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, ElectraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttestationElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -26,13 +26,7 @@ var (
|
||||
BlockMap map[[4]byte]func() (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
// MetaDataMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
MetaDataMap map[[4]byte]func() (metadata.Metadata, error)
|
||||
// AttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AttestationMap map[[4]byte]func() (ethpb.Att, error)
|
||||
// AggregateAttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
|
||||
MetaDataMap map[[4]byte]func() metadata.Metadata
|
||||
)
|
||||
|
||||
// InitializeDataMaps initializes all the relevant object maps. This function is called to
|
||||
@@ -73,68 +67,24 @@ func InitializeDataMaps() {
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
MetaDataMap = map[[4]byte]func() (metadata.Metadata, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}), nil
|
||||
MetaDataMap = map[[4]byte]func() metadata.Metadata{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our attestation map.
|
||||
AttestationMap = map[[4]byte]func() (ethpb.Att, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.AttestationElectra{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our aggregate attestation map.
|
||||
AggregateAttestationMap = map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInitializeDataMaps(t *testing.T) {
|
||||
@@ -46,36 +44,8 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.action()
|
||||
bFunc, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
_, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
b, err := bFunc()
|
||||
require.NoError(t, err)
|
||||
generic, err := b.PbGenericBlock()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, generic.GetPhase0())
|
||||
}
|
||||
mdFunc, ok := MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
md, err := mdFunc()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, md.MetadataObjV0())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
attFunc, ok := AttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
att, err := attFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, att.Version())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
aggFunc, ok := AggregateAttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
agg, err := aggFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, agg.Version())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -307,7 +308,11 @@ func (bs *Server) ListValidators(
|
||||
if req.Active {
|
||||
filteredValidators := make([]*ethpb.Validators_ValidatorContainer, 0)
|
||||
for _, item := range validatorList {
|
||||
if helpers.IsActiveValidator(item.Validator, requestedEpoch) {
|
||||
roVal, err := state_native.NewValidator(item.Validator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not create read-only validator: %v", err)
|
||||
}
|
||||
if helpers.IsActiveValidator(roVal, requestedEpoch) {
|
||||
filteredValidators = append(filteredValidators, item)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"pending_consolidations_root.go",
|
||||
"pending_partial_withdrawals_root.go",
|
||||
"reference.go",
|
||||
"slice_root.go",
|
||||
"sync_committee.root.go",
|
||||
"trie_helpers.go",
|
||||
"unrealized_justification.go",
|
||||
|
||||
@@ -2,10 +2,9 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package ssz
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
)
|
||||
|
||||
// SliceRoot computes the root of a slice of hashable objects.
|
||||
func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
max := limit
|
||||
if uint64(len(slice)) > max {
|
||||
return [32]byte{}, fmt.Errorf("slice exceeds max length %d", max)
|
||||
@@ -24,7 +25,7 @@ func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
sliceRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
sliceRoot, err := ssz.BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not slice merkleization")
|
||||
}
|
||||
@@ -35,5 +36,6 @@ func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
// We need to mix in the length of the slice.
|
||||
sliceLenRoot := make([]byte, 32)
|
||||
copy(sliceLenRoot, sliceLenBuf.Bytes())
|
||||
return MixInLength(sliceRoot, sliceLenRoot), nil
|
||||
res := ssz.MixInLength(sliceRoot, sliceLenRoot)
|
||||
return res, nil
|
||||
}
|
||||
@@ -44,7 +44,6 @@ go_library(
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_attestation_electra.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
@@ -161,6 +160,7 @@ go_test(
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_blob_sidecars_by_range_test.go",
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_chunked_response_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_metadata_test.go",
|
||||
@@ -177,7 +177,6 @@ go_test(
|
||||
"sync_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_electra_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_blob_test.go",
|
||||
|
||||
@@ -1,20 +1,13 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -57,12 +50,11 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
dt, err := extractValidDataTypeFromTopic(topic, fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dt != nil {
|
||||
m = dt
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := s.cfg.p2p.Encoding().DecodeGossip(msg.Data, m); err != nil {
|
||||
return nil, err
|
||||
@@ -71,7 +63,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
}
|
||||
|
||||
// Replaces our fork digest with the formatter.
|
||||
func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
func (_ *Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings := strings.Split(topic, "/")
|
||||
if len(subStrings) != 4 {
|
||||
return "", errInvalidTopic
|
||||
@@ -79,48 +71,3 @@ func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings[2] = "%x"
|
||||
return strings.Join(subStrings, "/"), nil
|
||||
}
|
||||
|
||||
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
|
||||
switch topic {
|
||||
case p2p.BlockSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
|
||||
case p2p.AttestationSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
|
||||
case p2p.AggregateAndProofSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), digest []byte, tor blockchain.TemporalOracle) (T, error) {
|
||||
var zero T
|
||||
|
||||
if len(digest) == 0 {
|
||||
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, f := range typeMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return f()
|
||||
}
|
||||
}
|
||||
return zero, errors.Wrapf(
|
||||
ErrNoValidDigest,
|
||||
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
|
||||
zero,
|
||||
digest,
|
||||
tor.GenesisTime(),
|
||||
tor.GenesisValidatorsRoot(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -11,20 +11,15 @@ import (
|
||||
"github.com/d4l3k/messagediff"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -114,197 +109,3 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
wantMd metadata.Metadata
|
||||
wantAtt ethpb.Att
|
||||
wantAggregate ethpb.SignedAggregateAttAndProof
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "capella fork version",
|
||||
args: args{
|
||||
digest: capellaDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "deneb fork version",
|
||||
args: args{
|
||||
digest: denebDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "electra fork version",
|
||||
args: args{
|
||||
digest: electraDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadElectra{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.AttestationElectra{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
|
||||
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
|
||||
}
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
|
||||
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
|
||||
}
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
|
||||
cancel: cancel,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -88,13 +87,12 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb.SignedAggregateAttestationAndProof) {
|
||||
for _, signedAtt := range attestations {
|
||||
aggregate := signedAtt.AggregateAttestationAndProof().AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
att := signedAtt.Message
|
||||
// The pending attestations can arrive in both aggregated and unaggregated forms,
|
||||
// each from has distinct validation steps.
|
||||
if helpers.IsAggregated(aggregate) {
|
||||
if helpers.IsAggregated(att.Aggregate) {
|
||||
// Save the pending aggregated attestation to the pool if it passes the aggregated
|
||||
// validation steps.
|
||||
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
|
||||
@@ -103,11 +101,11 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
|
||||
}
|
||||
aggValid := pubsub.ValidationAccept == valRes
|
||||
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
continue
|
||||
}
|
||||
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.Broadcast(ctx, signedAtt); err != nil {
|
||||
@@ -118,39 +116,39 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally reference checkpoint that's long ago.
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not verify FFG consistency")
|
||||
continue
|
||||
}
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Aggregate.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve attestation prestate")
|
||||
continue
|
||||
}
|
||||
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
|
||||
continue
|
||||
}
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
continue
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())
|
||||
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(att.Aggregate.Data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
continue
|
||||
}
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, aggregate), aggregate); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
@@ -162,8 +160,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
|
||||
// root of the missing block. The value is the list of pending attestations
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated attestations to the pending queue.
|
||||
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
|
||||
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
|
||||
|
||||
s.pendingAttsLock.Lock()
|
||||
defer s.pendingAttsLock.Unlock()
|
||||
@@ -180,7 +178,7 @@ func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
_, ok := s.blkRootToPendingAtts[root]
|
||||
if !ok {
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
|
||||
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
|
||||
return
|
||||
}
|
||||
// Skip if the attestation from the same aggregator already exists in
|
||||
@@ -194,32 +192,20 @@ func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
|
||||
}
|
||||
|
||||
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.GetSignature() != nil {
|
||||
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
|
||||
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
if a.Signature != nil {
|
||||
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
|
||||
}
|
||||
if b.GetSignature() != nil {
|
||||
if b.Signature != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
|
||||
aData := aAggregate.GetData()
|
||||
bData := bAggregate.GetData()
|
||||
|
||||
if aData.Slot != bData.Slot {
|
||||
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Version() >= version.Electra {
|
||||
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
|
||||
return false
|
||||
}
|
||||
} else if aData.CommitteeIndex != bData.CommitteeIndex {
|
||||
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
|
||||
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
|
||||
}
|
||||
|
||||
// This validates the pending attestations in the queue are still valid.
|
||||
@@ -235,7 +221,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
|
||||
|
||||
for bRoot, atts := range s.blkRootToPendingAtts {
|
||||
for i := len(atts) - 1; i >= 0; i-- {
|
||||
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the list in place.
|
||||
atts = append(atts[:i], atts[i+1:]...)
|
||||
}
|
||||
|
||||
@@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
@@ -162,7 +162,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -182,7 +182,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
@@ -245,13 +245,13 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
go r.verifierRoutine()
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
|
||||
@@ -330,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
@@ -353,7 +353,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
|
||||
// 100 Attestations per block root.
|
||||
@@ -401,7 +401,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
|
||||
r1 := [32]byte{'A'}
|
||||
@@ -428,7 +428,7 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
|
||||
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
|
||||
for i := 0; i < pendingAttsLimit; i++ {
|
||||
@@ -457,71 +457,5 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
|
||||
}
|
||||
|
||||
func Test_attsAreEqual_Committee(t *testing.T) {
|
||||
t.Run("Phase 0 equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Phase 0 not equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 456}}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(0, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra not equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(1, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -105,7 +107,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,7 +131,7 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -137,6 +139,30 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no block type exists for the genesis fork version.")
|
||||
}
|
||||
return bFunc()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, blkFunc := range types.BlockMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return blkFunc()
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// WriteBlobSidecarChunk writes blob chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {
|
||||
|
||||
121
beacon-chain/sync/rpc_chunked_response_test.go
Normal file
121
beacon-chain/sync/rpc_chunked_response_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtractBlockDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want interfaces.ReadOnlySignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,9 +7,13 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
@@ -108,7 +112,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,3 +133,27 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
|
||||
if len(digest) == 0 {
|
||||
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no metadata type exists for the genesis fork version.")
|
||||
}
|
||||
return mdFunc(), nil
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, mdFunc := range types.MetaDataMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return mdFunc(), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
@@ -2,13 +2,16 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
db "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
@@ -18,6 +21,7 @@ import (
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -229,3 +233,80 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractMetaDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
clock blockchain.TemporalOracle
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want metadata.Metadata
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ type Service struct {
|
||||
cancel context.CancelFunc
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]ethpb.SignedAggregateAttAndProof
|
||||
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
|
||||
subHandler *subTopicHandler
|
||||
pendingAttsLock sync.RWMutex
|
||||
pendingQueueLock sync.RWMutex
|
||||
@@ -171,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
|
||||
@@ -13,21 +13,19 @@ import (
|
||||
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
|
||||
// attestation pool for processing.
|
||||
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(ethpb.SignedAggregateAttAndProof)
|
||||
a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type ethpb.SignedAggregateAttAndProof, type=%T", msg)
|
||||
return fmt.Errorf("message was not type *ethpb.SignedAggregateAttestationAndProof, type=%T", msg)
|
||||
}
|
||||
|
||||
aggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
|
||||
if aggregate == nil || aggregate.GetData() == nil {
|
||||
if a.Message.Aggregate == nil || a.Message.Aggregate.Data == nil {
|
||||
return errors.New("nil aggregate")
|
||||
}
|
||||
|
||||
// An unaggregated attestation can make it here. It’s valid, the aggregator it just itself, although it means poor performance for the subnet.
|
||||
if !helpers.IsAggregated(aggregate) {
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
|
||||
if !helpers.IsAggregated(a.Message.Aggregate) {
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(a.Message.Aggregate)
|
||||
}
|
||||
|
||||
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
|
||||
return s.cfg.attPool.SaveAggregatedAttestation(a.Message.Aggregate)
|
||||
}
|
||||
|
||||
@@ -15,21 +15,19 @@ import (
|
||||
)
|
||||
|
||||
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(eth.Att)
|
||||
a, ok := msg.(*eth.Attestation)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type eth.Att, type=%T", msg)
|
||||
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
|
||||
}
|
||||
|
||||
data := a.GetData()
|
||||
|
||||
if data == nil {
|
||||
if a.Data == nil {
|
||||
return errors.New("nil attestation")
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, a.GetAggregationBits())
|
||||
s.setSeenCommitteeIndicesSlot(a.Data.Slot, a.Data.CommitteeIndex, a.AggregationBits)
|
||||
|
||||
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
|
||||
return errors.Wrap(err, "Could not determine if attestation pool has this atttestation")
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
@@ -38,11 +36,11 @@ func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, m
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
|
||||
}
|
||||
|
||||
func (*Service) persistentSubnetIndices() []uint64 {
|
||||
func (_ *Service) persistentSubnetIndices() []uint64 {
|
||||
return cache.SubnetIDs.GetAllSubnets()
|
||||
}
|
||||
|
||||
func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
endEpoch := slots.ToEpoch(currentSlot) + 1
|
||||
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
|
||||
var commIds []uint64
|
||||
@@ -52,7 +50,7 @@ func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
return slice.SetUint64(commIds)
|
||||
}
|
||||
|
||||
func (*Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
func (_ *Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
endEpoch := slots.ToEpoch(currentSlot) + 1
|
||||
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
|
||||
var commIds []uint64
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -48,48 +47,38 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
m, ok := raw.(ethpb.SignedAggregateAttAndProof)
|
||||
m, ok := raw.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid message type: %T", raw)
|
||||
}
|
||||
if m.AggregateAttestationAndProof() == nil {
|
||||
if m.Message == nil {
|
||||
return pubsub.ValidationReject, errNilMessage
|
||||
}
|
||||
|
||||
aggregate := m.AggregateAttestationAndProof().AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(aggregate); err != nil {
|
||||
if err := helpers.ValidateNilAttestation(m.Message.Aggregate); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Do not process slot 0 aggregates.
|
||||
if data.Slot == 0 {
|
||||
if m.Message.Aggregate.Data.Slot == 0 {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Broadcast the aggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received aggregated attestation.
|
||||
// TODO: this will be extended to Electra in a later PR
|
||||
if m.Version() == version.Phase0 {
|
||||
phase0Att, ok := m.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if ok {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: phase0Att.Message,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: m.Message,
|
||||
},
|
||||
})
|
||||
|
||||
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(m.Message.Aggregate.Data); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(
|
||||
data.Slot,
|
||||
m.Message.Aggregate.Data.Slot,
|
||||
s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance,
|
||||
); err != nil {
|
||||
@@ -98,19 +87,19 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
}
|
||||
|
||||
// Verify this is the first aggregate received from the aggregator with index and slot.
|
||||
if s.hasSeenAggregatorIndexEpoch(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex()) {
|
||||
if s.hasSeenAggregatorIndexEpoch(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
// Check that the block being voted on isn't invalid.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
|
||||
}
|
||||
|
||||
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.
|
||||
seen, err := s.cfg.attPool.HasAggregatedAttestation(aggregate)
|
||||
seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
@@ -127,7 +116,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
s.setAggregatorIndexEpochSeen(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex)
|
||||
|
||||
msg.ValidatorData = m
|
||||
|
||||
@@ -136,75 +125,44 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.SignedAggregateAttAndProof) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.SignedAggregateAttestationAndProof) (pubsub.ValidationResult, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateAggregatedAtt")
|
||||
defer span.End()
|
||||
|
||||
aggregateAndProof := signed.AggregateAttestationAndProof()
|
||||
aggregatorIndex := aggregateAndProof.GetAggregatorIndex()
|
||||
aggregate := aggregateAndProof.AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
|
||||
// Verify attestation target root is consistent with the head root.
|
||||
// This verification is not in the spec, however we guard against it as it opens us up
|
||||
// to weird edge cases during verification. The attestation technically could be used to add value to a block,
|
||||
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
attBadLmdConsistencyCount.Inc()
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(signed.Message.Aggregate.Data.BeaconBlockRoot)) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
bs, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
bs, err := s.cfg.chain.AttestationTargetState(ctx, signed.Message.Aggregate.Data.Target)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Verify validator index is within the beacon committee.
|
||||
result, err := s.validateIndexInCommittee(ctx, bs, aggregate, aggregatorIndex)
|
||||
result, err := s.validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex)
|
||||
if result != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate index in committee")
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return result, wrappedErr
|
||||
}
|
||||
|
||||
var committeeIndex primitives.CommitteeIndex
|
||||
if signed.Version() >= version.Electra {
|
||||
a, ok := aggregate.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
err := fmt.Errorf("aggregate attestation has wrong type (expected %T, got %T)", ðpb.AttestationElectra{}, aggregate)
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
committeeIndex, result, err = validateCommitteeIndexElectra(ctx, a)
|
||||
if result != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return result, wrappedErr
|
||||
}
|
||||
} else {
|
||||
committeeIndex = data.CommitteeIndex
|
||||
}
|
||||
|
||||
// Verify selection proof reflects to the right validator.
|
||||
selectionSigSet, err := validateSelectionIndex(
|
||||
ctx,
|
||||
bs,
|
||||
data.Slot,
|
||||
committeeIndex,
|
||||
aggregatorIndex,
|
||||
aggregateAndProof.GetSelectionProof(),
|
||||
)
|
||||
selectionSigSet, err := validateSelectionIndex(ctx, bs, signed.Message.Aggregate.Data, signed.Message.AggregatorIndex, signed.Message.SelectionProof)
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate selection for validator %d", aggregateAndProof.GetAggregatorIndex())
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
attBadSelectionProofCount.Inc()
|
||||
return pubsub.ValidationReject, wrappedErr
|
||||
@@ -214,13 +172,13 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
|
||||
// We use batch verify here to save compute.
|
||||
aggregatorSigSet, err := aggSigSet(bs, signed)
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "could not get aggregator sig set %d", aggregatorIndex)
|
||||
wrappedErr := errors.Wrapf(err, "Could not get aggregator sig set %d", signed.Message.AggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return pubsub.ValidationIgnore, wrappedErr
|
||||
}
|
||||
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{aggregate})
|
||||
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{signed.Message.Aggregate})
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "could not verify aggregator signature %d", aggregatorIndex)
|
||||
wrappedErr := errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return pubsub.ValidationIgnore, wrappedErr
|
||||
}
|
||||
@@ -230,9 +188,10 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
|
||||
return s.validateWithBatchVerifier(ctx, "aggregate", set)
|
||||
}
|
||||
|
||||
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
|
||||
func (s *Service) validateBlockInAttestation(ctx context.Context, satt *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
a := satt.Message
|
||||
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
blockRoot := bytesutil.ToBytes32(a.Aggregate.Data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
s.savePendingAtt(satt)
|
||||
@@ -275,7 +234,7 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
|
||||
return result, err
|
||||
}
|
||||
|
||||
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
|
||||
committee, result, err := s.validateBitLength(ctx, a, bs)
|
||||
if result != pubsub.ValidationAccept {
|
||||
return result, err
|
||||
}
|
||||
@@ -303,15 +262,14 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
|
||||
func validateSelectionIndex(
|
||||
ctx context.Context,
|
||||
bs state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
data *ethpb.AttestationData,
|
||||
validatorIndex primitives.ValidatorIndex,
|
||||
proof []byte,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateSelectionIndex")
|
||||
defer span.End()
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, data.Slot, data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -320,11 +278,11 @@ func validateSelectionIndex(
|
||||
return nil, err
|
||||
}
|
||||
if !aggregator {
|
||||
return nil, fmt.Errorf("validator is not an aggregator for slot %d", slot)
|
||||
return nil, fmt.Errorf("validator is not an aggregator for slot %d", data.Slot)
|
||||
}
|
||||
|
||||
domain := params.BeaconConfig().DomainSelectionProof
|
||||
epoch := slots.ToEpoch(slot)
|
||||
epoch := slots.ToEpoch(data.Slot)
|
||||
|
||||
v, err := bs.ValidatorAtIndex(validatorIndex)
|
||||
if err != nil {
|
||||
@@ -339,7 +297,7 @@ func validateSelectionIndex(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sszUint := primitives.SSZUint64(slot)
|
||||
sszUint := primitives.SSZUint64(data.Slot)
|
||||
root, err := signing.ComputeSigningRoot(&sszUint, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -353,10 +311,8 @@ func validateSelectionIndex(
|
||||
}
|
||||
|
||||
// This returns aggregator signature set which can be used to batch verify.
|
||||
func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof) (*bls.SignatureBatch, error) {
|
||||
aggregateAndProof := a.AggregateAttestationAndProof()
|
||||
|
||||
v, err := s.ValidatorAtIndex(aggregateAndProof.GetAggregatorIndex())
|
||||
func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureBatch, error) {
|
||||
v, err := s.ValidatorAtIndex(a.Message.AggregatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -365,17 +321,17 @@ func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(aggregateAndProof.AggregateVal().GetData().Slot)
|
||||
epoch := slots.ToEpoch(a.Message.Aggregate.Data.Slot)
|
||||
d, err := signing.Domain(s.Fork(), epoch, params.BeaconConfig().DomainAggregateAndProof, s.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root, err := signing.ComputeSigningRoot(aggregateAndProof, d)
|
||||
root, err := signing.ComputeSigningRoot(a.Message, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{a.GetSignature()},
|
||||
Signatures: [][]byte{a.Signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
Descriptions: []string{signing.AggregatorSignature},
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) {
|
||||
sig := privKeys[0].Sign([]byte{'A'})
|
||||
data := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
_, err := validateSelectionIndex(ctx, beaconState, data.Slot, data.CommitteeIndex, 0, sig.Marshal())
|
||||
_, err := validateSelectionIndex(ctx, beaconState, data, 0, sig.Marshal())
|
||||
wanted := "validator is not an aggregator for slot"
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
attPool: attestations.NewPool(),
|
||||
chain: &mock.ChainService{},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenAggregatedAttestationCache: c,
|
||||
}
|
||||
r.initCaches()
|
||||
@@ -302,7 +302,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -57,18 +55,16 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
att, ok := m.(eth.Att)
|
||||
att, ok := m.(*eth.Attestation)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
data := att.GetData()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Do not process slot 0 attestations.
|
||||
if data.Slot == 0 {
|
||||
if att.Data.Slot == 0 {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
@@ -82,36 +78,15 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
var validationRes pubsub.ValidationResult
|
||||
|
||||
var committeeIndex primitives.CommitteeIndex
|
||||
if att.Version() >= version.Electra {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
err := fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.AttestationElectra{}, att)
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
committeeIndex, validationRes, err = validateCommitteeIndexElectra(ctx, a)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return validationRes, wrappedErr
|
||||
}
|
||||
} else {
|
||||
committeeIndex = data.CommitteeIndex
|
||||
}
|
||||
|
||||
if features.Get().EnableSlasher {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
@@ -119,13 +94,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
// Using a different context to prevent timeouts as this operation can be expensive
|
||||
// and we want to avoid affecting the critical code path.
|
||||
ctx := context.TODO()
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve pre state")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -142,41 +117,27 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
}
|
||||
|
||||
// Verify this the first attestation received for the participating validator for the slot.
|
||||
if s.hasSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits()) {
|
||||
if s.hasSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject an attestation if it references an invalid block.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
|
||||
}
|
||||
|
||||
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
if att.Version() >= version.Electra {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.AttestationElectra{}, att)
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProofElectra{Message: ð.AggregateAttestationAndProofElectra{Aggregate: a}})
|
||||
} else {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.Attestation{}, att)
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: a}})
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: att}})
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
@@ -186,13 +147,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
return validationRes, err
|
||||
}
|
||||
@@ -202,7 +163,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
|
||||
s.setSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits)
|
||||
|
||||
msg.ValidatorData = att
|
||||
|
||||
@@ -250,7 +211,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttWithState")
|
||||
defer span.End()
|
||||
|
||||
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
|
||||
committee, result, err := s.validateBitLength(ctx, a, bs)
|
||||
if result != pubsub.ValidationAccept {
|
||||
return result, err
|
||||
}
|
||||
@@ -271,20 +232,14 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
|
||||
return s.validateWithBatchVerifier(ctx, "attestation", set)
|
||||
}
|
||||
|
||||
func (s *Service) validateBitLength(
|
||||
ctx context.Context,
|
||||
bs state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
aggregationBits bitfield.Bitlist,
|
||||
) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
|
||||
func (s *Service) validateBitLength(ctx context.Context, a eth.Att, bs state.ReadOnlyBeaconState) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Verify number of aggregation bits matches the committee size.
|
||||
if err := helpers.VerifyBitfieldLength(aggregationBits, uint64(len(committee))); err != nil {
|
||||
if err := helpers.VerifyBitfieldLength(a.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return nil, pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func validateCommitteeIndexElectra(ctx context.Context, a *ethpb.AttestationElectra) (primitives.CommitteeIndex, pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateCommitteeIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
ci := a.Data.CommitteeIndex
|
||||
if ci != 0 {
|
||||
return 0, pubsub.ValidationReject, fmt.Errorf("committee index must be 0 but was %d", ci)
|
||||
}
|
||||
committeeIndices := helpers.CommitteeIndices(a.CommitteeBits)
|
||||
if len(committeeIndices) != 1 {
|
||||
return 0, pubsub.ValidationReject, fmt.Errorf("exactly 1 committee index must be set but %d were set", len(committeeIndices))
|
||||
}
|
||||
return committeeIndices[0], pubsub.ValidationAccept, nil
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func Test_validateCommitteeIndexElectra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
ci, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationAccept, res)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), ci)
|
||||
})
|
||||
t.Run("non-zero data committee index", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{CommitteeIndex: 1}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
t.Run("no committee bits set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
t.Run("more than 1 committee bit set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
m.Message.Topic = nil
|
||||
}
|
||||
|
||||
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "", m)
|
||||
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "" /*peerID*/, m)
|
||||
received := res == pubsub.ValidationAccept
|
||||
if received != tt.want {
|
||||
t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want)
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cmd/prysmctl/checkpointsync:go_default_library",
|
||||
"//cmd/prysmctl/codegen:go_default_library",
|
||||
"//cmd/prysmctl/db:go_default_library",
|
||||
"//cmd/prysmctl/p2p:go_default_library",
|
||||
"//cmd/prysmctl/testnet:go_default_library",
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cmd.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_offchainlabs_methodical_ssz//cmd/ssz/commands:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
package codegen
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/methodical-ssz/cmd/ssz/commands"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "ssz",
|
||||
Usage: "ssz code generation utilities",
|
||||
Subcommands: commands.All,
|
||||
},
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/checkpointsync"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/testnet"
|
||||
@@ -33,5 +32,4 @@ func init() {
|
||||
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, weaksubjectivity.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, validator.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, codegen.Commands...)
|
||||
}
|
||||
|
||||
48
deps.bzl
48
deps.bzl
@@ -678,12 +678,6 @@ def prysm_deps():
|
||||
sum = "h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=",
|
||||
version = "v1.5.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_dave_jennifer",
|
||||
importpath = "github.com/dave/jennifer",
|
||||
sum = "h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_davecgh_go_spew",
|
||||
importpath = "github.com/davecgh/go-spew",
|
||||
@@ -2592,12 +2586,6 @@ def prysm_deps():
|
||||
sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
|
||||
version = "v0.0.0-20170623195520-56545f4a5d46",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_offchainlabs_methodical_ssz",
|
||||
importpath = "github.com/OffchainLabs/methodical-ssz",
|
||||
sum = "h1:56W3xtZyeLcbKjt5staxK/jMdE6ql69rPhbb/XmZPhA=",
|
||||
version = "v0.0.0-20240712201410-cd5a2975775c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_oklog_oklog",
|
||||
importpath = "github.com/oklog/oklog",
|
||||
@@ -2985,8 +2973,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_fastssz",
|
||||
importpath = "github.com/prysmaticlabs/fastssz",
|
||||
sum = "h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=",
|
||||
version = "v0.0.0-20240620202422-a981b8ef89d3",
|
||||
sum = "h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=",
|
||||
version = "v0.0.0-20221107182844-78142813af44",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_go_bitfield",
|
||||
@@ -4810,8 +4798,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_crypto",
|
||||
importpath = "golang.org/x/crypto",
|
||||
sum = "h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=",
|
||||
version = "v0.25.0",
|
||||
sum = "h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=",
|
||||
version = "v0.23.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_exp",
|
||||
@@ -4846,14 +4834,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_mod",
|
||||
importpath = "golang.org/x/mod",
|
||||
sum = "h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=",
|
||||
version = "v0.19.0",
|
||||
sum = "h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=",
|
||||
version = "v0.17.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=",
|
||||
version = "v0.27.0",
|
||||
sum = "h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=",
|
||||
version = "v0.25.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_oauth2",
|
||||
@@ -4876,26 +4864,26 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_sys",
|
||||
importpath = "golang.org/x/sys",
|
||||
sum = "h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=",
|
||||
version = "v0.22.0",
|
||||
sum = "h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=",
|
||||
version = "v0.20.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_telemetry",
|
||||
importpath = "golang.org/x/telemetry",
|
||||
sum = "h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=",
|
||||
version = "v0.0.0-20240521205824-bda55230c457",
|
||||
sum = "h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY=",
|
||||
version = "v0.0.0-20240228155512-f48c80bd79b2",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_term",
|
||||
importpath = "golang.org/x/term",
|
||||
sum = "h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=",
|
||||
version = "v0.22.0",
|
||||
sum = "h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=",
|
||||
version = "v0.20.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_text",
|
||||
importpath = "golang.org/x/text",
|
||||
sum = "h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=",
|
||||
version = "v0.16.0",
|
||||
sum = "h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=",
|
||||
version = "v0.15.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_time",
|
||||
@@ -4906,8 +4894,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_tools",
|
||||
importpath = "golang.org/x/tools",
|
||||
sum = "h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=",
|
||||
version = "v0.23.0",
|
||||
sum = "h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=",
|
||||
version = "v0.21.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_xerrors",
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"helpers.go",
|
||||
"htrutils.go",
|
||||
"merkleize.go",
|
||||
"slice_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/encoding/ssz",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -144,13 +144,51 @@ func WithdrawalSliceRoot(withdrawals []*enginev1.Withdrawal, limit uint64) ([32]
|
||||
// DepositRequestsSliceRoot computes the HTR of a slice of deposit receipts.
|
||||
// The limit parameter is used as input to the bitwise merkleization algorithm.
|
||||
func DepositRequestsSliceRoot(depositRequests []*enginev1.DepositRequest, limit uint64) ([32]byte, error) {
|
||||
return SliceRoot(depositRequests, limit)
|
||||
roots := make([][32]byte, len(depositRequests))
|
||||
for i := 0; i < len(depositRequests); i++ {
|
||||
r, err := depositRequests[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
bytesRootBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(depositRequests))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal length")
|
||||
}
|
||||
bytesRootBufRoot := make([]byte, 32)
|
||||
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
}
|
||||
|
||||
// WithdrawalRequestsSliceRoot computes the HTR of a slice of withdrawal requests from the EL.
|
||||
// The limit parameter is used as input to the bitwise merkleization algorithm.
|
||||
func WithdrawalRequestsSliceRoot(withdrawalRequests []*enginev1.WithdrawalRequest, limit uint64) ([32]byte, error) {
|
||||
return SliceRoot(withdrawalRequests, limit)
|
||||
roots := make([][32]byte, len(withdrawalRequests))
|
||||
for i := 0; i < len(withdrawalRequests); i++ {
|
||||
r, err := withdrawalRequests[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
bytesRootBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(withdrawalRequests))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal length")
|
||||
}
|
||||
bytesRootBufRoot := make([]byte, 32)
|
||||
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
}
|
||||
|
||||
// ByteSliceRoot is a helper func to merkleize an arbitrary List[Byte, N]
|
||||
|
||||
20
go.mod
20
go.mod
@@ -8,7 +8,6 @@ require (
|
||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c
|
||||
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
@@ -66,7 +65,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
|
||||
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294
|
||||
@@ -89,11 +88,11 @@ require (
|
||||
go.opencensus.io v0.24.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/mock v0.4.0
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/mod v0.19.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/tools v0.23.0
|
||||
golang.org/x/tools v0.21.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.56.3
|
||||
google.golang.org/protobuf v1.34.1
|
||||
@@ -126,7 +125,6 @@ require (
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/dave/jennifer v1.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.5.0 // indirect
|
||||
@@ -252,10 +250,10 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/term v0.22.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/term v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
@@ -275,7 +273,7 @@ require (
|
||||
github.com/go-playground/validator/v10 v10.13.0
|
||||
github.com/peterh/liner v1.2.0 // indirect
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
google.golang.org/api v0.44.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
k8s.io/klog/v2 v2.80.0 // indirect
|
||||
@@ -286,5 +284,3 @@ replace github.com/json-iterator/go => github.com/prestonvanloon/go v1.1.7-0.201
|
||||
|
||||
// See https://github.com/prysmaticlabs/grpc-gateway/issues/2
|
||||
replace github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446
|
||||
|
||||
//replace github.com/OffchainLabs/methodical-ssz => /Users/kasey/src/OffchainLabs/methodical-ssz
|
||||
|
||||
36
go.sum
36
go.sum
@@ -73,8 +73,6 @@ github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97/go.mo
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c h1:56W3xtZyeLcbKjt5staxK/jMdE6ql69rPhbb/XmZPhA=
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c/go.mod h1:Ndc6p2Xsj0fV3Mx4Ufv32RZ4K9SvWhzhOxtMuTXDU1I=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@@ -208,8 +206,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||
github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo=
|
||||
github.com/dave/jennifer v1.7.0 h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE=
|
||||
github.com/dave/jennifer v1.7.0/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -970,8 +966,8 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
|
||||
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
|
||||
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3 h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3/go.mod h1:h2OlIZD/M6wFvV3YMZbW16lFgh3Rsye00G44J2cwLyU=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
|
||||
@@ -1242,8 +1238,8 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -1286,8 +1282,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1348,8 +1344,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1485,8 +1481,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -1498,8 +1494,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1515,8 +1511,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1595,8 +1591,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
# Script to copy ssz.go files from bazel build folder to appropriate location.
|
||||
# Bazel builds to bazel-bin/... folder, script copies them back to original folder where target is.
|
||||
|
||||
#bazel query 'kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
bazel query 'kind(ssz_methodical, //proto/...) union kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
bazel query 'kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
|
||||
# Get locations of proto ssz.go files.
|
||||
file_list=()
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*\\.pb.*.go": "Generated code is ok",
|
||||
".*\\.ssz\\.go": "Generated code is ok"
|
||||
".*generated\\.ssz\\.go": "Generated code is ok"
|
||||
}
|
||||
},
|
||||
"properpermissions": {
|
||||
@@ -180,7 +180,7 @@
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*\\.pb.*.go": "Generated code is ok",
|
||||
".*\\.ssz\\.go": "Generated code is ok",
|
||||
".*generated\\.ssz\\.go": "Generated code is ok",
|
||||
".*_test\\.go": "Tests are ok (for now)",
|
||||
"tools/analyzers/ineffassign/ineffassign\\.go": "3rd party code with a massive switch statement"
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ proto_library(
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_files",
|
||||
go_proto = ":go_proto",
|
||||
out = "engine.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
],
|
||||
|
||||
@@ -190,7 +190,7 @@ func (e *ExecutionPayload) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 508 {
|
||||
if o10 < 508 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -336,7 +336,11 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -370,13 +374,25 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
}
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -578,7 +594,7 @@ func (e *ExecutionPayloadCapella) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 512 {
|
||||
if o10 < 512 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -750,7 +766,11 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -784,10 +804,18 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
}
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (14) 'Withdrawals'
|
||||
@@ -803,10 +831,18 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1014,7 +1050,7 @@ func (e *ExecutionPayloadDeneb) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 528 {
|
||||
if o10 < 528 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1192,7 +1228,11 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -1226,10 +1266,18 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
}
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (14) 'Withdrawals'
|
||||
@@ -1245,7 +1293,11 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (15) 'BlobGasUsed'
|
||||
@@ -1254,7 +1306,11 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (16) 'ExcessBlobGas'
|
||||
hh.PutUint64(e.ExcessBlobGas)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1492,7 +1548,7 @@ func (e *ExecutionPayloadElectra) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 536 {
|
||||
if o10 < 536 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1722,7 +1778,11 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -1756,10 +1816,18 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.AppendBytes32(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
|
||||
}
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 1048576)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (14) 'Withdrawals'
|
||||
@@ -1775,7 +1843,11 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (15) 'BlobGasUsed'
|
||||
@@ -1797,7 +1869,11 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 8192)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 8192)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 8192)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (18) 'WithdrawalRequests'
|
||||
@@ -1813,10 +1889,18 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1983,7 +2067,7 @@ func (e *ExecutionPayloadHeader) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 536 {
|
||||
if o10 < 536 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2101,7 +2185,11 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -2125,7 +2213,11 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(e.TransactionsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2299,7 +2391,7 @@ func (e *ExecutionPayloadHeaderCapella) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 568 {
|
||||
if o10 < 568 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2423,7 +2515,11 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -2454,7 +2550,11 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
}
|
||||
hh.PutBytes(e.WithdrawalsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2634,7 +2734,7 @@ func (e *ExecutionPayloadHeaderDeneb) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 584 {
|
||||
if o10 < 584 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2764,7 +2864,11 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -2801,7 +2905,11 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
|
||||
// Field (16) 'ExcessBlobGas'
|
||||
hh.PutUint64(e.ExcessBlobGas)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2995,7 +3103,7 @@ func (e *ExecutionPayloadHeaderElectra) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o10 != 648 {
|
||||
if o10 < 648 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -3137,7 +3245,11 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ExtraData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (11) 'BaseFeePerGas'
|
||||
@@ -3188,7 +3300,11 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
}
|
||||
hh.PutBytes(e.WithdrawalRequestsRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3277,7 +3393,11 @@ func (w *Withdrawal) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (3) 'Amount'
|
||||
hh.PutUint64(w.Amount)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3361,7 +3481,7 @@ func (b *BlobsBundle) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 12 {
|
||||
if o0 < 12 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -3466,7 +3586,11 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
|
||||
numItems := uint64(len(b.KzgCommitments))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (1) 'Proofs'
|
||||
@@ -3485,7 +3609,11 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
|
||||
numItems := uint64(len(b.Proofs))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (2) 'Blobs'
|
||||
@@ -3504,10 +3632,18 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
|
||||
numItems := uint64(len(b.Blobs))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
|
||||
}
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3598,7 +3734,11 @@ func (w *WithdrawalRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (2) 'Amount'
|
||||
hh.PutUint64(w.Amount)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3718,6 +3858,10 @@ func (d *DepositRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (4) 'Index'
|
||||
hh.PutUint64(d.Index)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -35,7 +35,6 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_files",
|
||||
go_proto = ":go_proto",
|
||||
out = "gateway.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 2874e1dadeb47411763f48fe31e5daaa91ac663e796933d9a508c2e7be94fa5e
|
||||
// Hash: d06a72227c2f5e350916cce3e89f4e855135a2a22f6ea263dedc68fa506c1ba7
|
||||
package v1
|
||||
|
||||
import (
|
||||
@@ -62,7 +62,7 @@ func (a *Attestation) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 228 {
|
||||
if o0 < 228 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -132,7 +132,11 @@ func (a *Attestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(a.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -190,7 +194,7 @@ func (a *AggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 108 {
|
||||
if o1 < 108 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -250,7 +254,11 @@ func (a *AggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (err err
|
||||
}
|
||||
hh.PutBytes(a.SelectionProof)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -302,7 +310,7 @@ func (s *SignedAggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 100 {
|
||||
if o0 < 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -359,7 +367,11 @@ func (s *SignedAggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (e
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -481,7 +493,11 @@ func (a *AttestationData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -552,7 +568,11 @@ func (c *Checkpoint) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(c.Root)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -635,7 +655,7 @@ func (b *BeaconBlock) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o4 != 84 {
|
||||
if o4 < 84 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -699,7 +719,11 @@ func (b *BeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -751,7 +775,7 @@ func (s *SignedBeaconBlock) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 100 {
|
||||
if o0 < 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -808,7 +832,11 @@ func (s *SignedBeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -978,7 +1006,7 @@ func (b *BeaconBlockBody) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o3 != 220 {
|
||||
if o3 < 220 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1171,7 +1199,11 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (4) 'AttesterSlashings'
|
||||
@@ -1187,7 +1219,11 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 2)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 2)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 2)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (5) 'Attestations'
|
||||
@@ -1203,7 +1239,11 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 128)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 128)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 128)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (6) 'Deposits'
|
||||
@@ -1219,7 +1259,11 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (7) 'VoluntaryExits'
|
||||
@@ -1235,10 +1279,18 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, num, 16)
|
||||
}
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1322,7 +1374,11 @@ func (p *ProposerSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1379,7 +1435,7 @@ func (a *AttesterSlashing) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 8 {
|
||||
if o0 < 8 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -1450,7 +1506,11 @@ func (a *AttesterSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1544,7 +1604,12 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.Append(i)
|
||||
}
|
||||
hh.Merkleize(subIndx)
|
||||
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(subIndx)
|
||||
} else {
|
||||
hh.Merkleize(subIndx)
|
||||
}
|
||||
}
|
||||
|
||||
// Field (1) 'Data'
|
||||
@@ -1552,7 +1617,11 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1612,7 +1681,11 @@ func (v *VoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (1) 'ValidatorIndex'
|
||||
hh.PutUint64(uint64(v.ValidatorIndex))
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1695,7 +1768,11 @@ func (s *SignedVoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1786,7 +1863,11 @@ func (e *Eth1Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(e.BlockHash)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1906,7 +1987,11 @@ func (b *BeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(b.BodyRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1989,7 +2074,11 @@ func (s *SignedBeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2050,7 +2139,7 @@ func (i *IndexedAttestation) UnmarshalSSZ(buf []byte) error {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 228 {
|
||||
if o0 < 228 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
@@ -2115,7 +2204,11 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.FillUpTo32()
|
||||
|
||||
numItems := uint64(len(i.AttestingIndices))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
|
||||
} else {
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
|
||||
}
|
||||
}
|
||||
|
||||
// Field (1) 'Data'
|
||||
@@ -2130,7 +2223,11 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(i.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2212,7 +2309,11 @@ func (s *SyncAggregate) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(s.SyncCommitteeSignature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2323,7 +2424,11 @@ func (d *Deposit_Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
hh.PutBytes(d.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2459,6 +2564,10 @@ func (v *Validator) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
// Field (7) 'WithdrawableEpoch'
|
||||
hh.PutUint64(uint64(v.WithdrawableEpoch))
|
||||
|
||||
hh.Merkleize(indx)
|
||||
if ssz.EnableVectorizedHTR {
|
||||
hh.MerkleizeVectorizedHTR(indx)
|
||||
} else {
|
||||
hh.Merkleize(indx)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -31,7 +31,6 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_files",
|
||||
go_proto = ":go_proto",
|
||||
out = "grpc.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
load("//proto:ssz_proto_library.bzl", "ssz_proto_files")
|
||||
load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
load("//tools:methodical.bzl", "ssz_methodical")
|
||||
|
||||
proto_library(
|
||||
name = "proto",
|
||||
@@ -46,250 +45,111 @@ proto_library(
|
||||
##############################################################################
|
||||
# Go
|
||||
##############################################################################
|
||||
|
||||
ssz_phase0_objs = [
|
||||
"AggregateAttestationAndProof",
|
||||
"Attestation",
|
||||
"AttestationData",
|
||||
"AttesterSlashing",
|
||||
"BeaconBlock",
|
||||
"BeaconBlockBody",
|
||||
"BeaconBlockHeader",
|
||||
"BeaconState",
|
||||
"Checkpoint",
|
||||
"Deposit",
|
||||
"Deposit_Data",
|
||||
"DepositMessage",
|
||||
"ENRForkID",
|
||||
"Eth1Data",
|
||||
"Fork",
|
||||
"ForkData",
|
||||
"HistoricalBatch",
|
||||
"IndexedAttestation",
|
||||
"PendingAttestation",
|
||||
"PowBlock",
|
||||
"ProposerSlashing",
|
||||
"SignedAggregateAttestationAndProof",
|
||||
"SignedBeaconBlock",
|
||||
"SignedBeaconBlockHeader",
|
||||
"SignedVoluntaryExit",
|
||||
"SigningData",
|
||||
"Status",
|
||||
"Validator",
|
||||
"VoluntaryExit",
|
||||
]
|
||||
|
||||
ssz_altair_objs = [
|
||||
"BeaconBlockAltair",
|
||||
"BeaconBlockBodyAltair",
|
||||
"BeaconStateAltair",
|
||||
"ContributionAndProof",
|
||||
"SignedBeaconBlockAltair",
|
||||
"SignedContributionAndProof",
|
||||
"SyncAggregate",
|
||||
"SyncAggregatorSelectionData",
|
||||
"SyncCommittee",
|
||||
"SyncCommitteeContribution",
|
||||
"SyncCommitteeMessage",
|
||||
]
|
||||
|
||||
ssz_bellatrix_objs = [
|
||||
"BeaconBlockBellatrix",
|
||||
"BeaconBlockBodyBellatrix",
|
||||
"BeaconStateBellatrix",
|
||||
"BlindedBeaconBlockBellatrix",
|
||||
"BlindedBeaconBlockBodyBellatrix",
|
||||
"SignedBeaconBlockBellatrix",
|
||||
"SignedBlindedBeaconBlockBellatrix",
|
||||
]
|
||||
|
||||
ssz_capella_objs = [
|
||||
"BLSToExecutionChange",
|
||||
"BeaconBlockBodyCapella",
|
||||
"BeaconBlockCapella",
|
||||
"BeaconStateCapella",
|
||||
"BlindedBeaconBlockBodyCapella",
|
||||
"BlindedBeaconBlockCapella",
|
||||
"BuilderBidCapella",
|
||||
"HistoricalSummary",
|
||||
"SignedBLSToExecutionChange",
|
||||
"SignedBeaconBlockCapella",
|
||||
"SignedBlindedBeaconBlockCapella",
|
||||
]
|
||||
|
||||
ssz_deneb_objs = [
|
||||
"BeaconBlockBodyDeneb",
|
||||
"BeaconBlockContentsDeneb",
|
||||
"BeaconBlockDeneb",
|
||||
"BeaconStateDeneb",
|
||||
"BlindedBeaconBlockBodyDeneb",
|
||||
"BlindedBeaconBlockDeneb",
|
||||
"BlobIdentifier",
|
||||
"BlobSidecar",
|
||||
"BlobSidecars",
|
||||
"BuilderBidDeneb",
|
||||
"SignedBeaconBlockContentsDeneb",
|
||||
"SignedBeaconBlockDeneb",
|
||||
"SignedBlindedBeaconBlockDeneb",
|
||||
]
|
||||
|
||||
ssz_electra_objs = [
|
||||
"AggregateAttestationAndProofElectra",
|
||||
"AttestationElectra",
|
||||
"AttesterSlashingElectra",
|
||||
"BeaconBlockElectra",
|
||||
"BeaconBlockBodyElectra",
|
||||
"BeaconStateElectra",
|
||||
"BlindedBeaconBlockBodyElectra",
|
||||
"BlindedBeaconBlockElectra",
|
||||
"Consolidation",
|
||||
"IndexedAttestationElectra",
|
||||
"PendingBalanceDeposit",
|
||||
"PendingConsolidation",
|
||||
"PendingPartialWithdrawal",
|
||||
"SignedAggregateAttestationAndProofElectra",
|
||||
"SignedBeaconBlockElectra",
|
||||
"SignedBlindedBeaconBlockElectra",
|
||||
"SignedConsolidation",
|
||||
]
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_phase0",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_phase0_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "phase0.ssz.go",
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_altair",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_altair_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "altair.ssz.go",
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_bellatrix",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_bellatrix_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "bellatrix.ssz.go",
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_capella",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_capella_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "capella.ssz.go",
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_deneb",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_deneb_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "deneb.ssz.go",
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_electra",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_electra_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
out = "electra.ssz.go",
|
||||
)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_altair",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "altair.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_altair_objs,
|
||||
# exclude_objs = ssz_phase0_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_bellatrix",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "bellatrix.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_bellatrix_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_capella",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "capella.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_capella_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_deneb",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "deneb.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_deneb_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_electra",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "electra.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_electra_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs,
|
||||
#)
|
||||
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_non_core",
|
||||
name = "ssz_generated_files",
|
||||
go_proto = ":go_proto",
|
||||
out = "non-core.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = [
|
||||
"BeaconBlockAltair",
|
||||
"BeaconBlockBodyAltair",
|
||||
"SignedBeaconBlockAltair",
|
||||
"BeaconBlockBellatrix",
|
||||
"BeaconBlockBodyBellatrix",
|
||||
"SignedBeaconBlockBellatrix",
|
||||
"SignedBlindedBeaconBlockBellatrix",
|
||||
"BlindedBeaconBlockBellatrix",
|
||||
"BlindedBeaconBlockBodyBellatrix",
|
||||
"BeaconBlockCapella",
|
||||
"BeaconBlockBodyCapella",
|
||||
"SignedBeaconBlockCapella",
|
||||
"BeaconBlockDeneb",
|
||||
"BeaconBlockBodyDeneb",
|
||||
"SignedBeaconBlockDeneb",
|
||||
"BeaconBlockElectra",
|
||||
"BeaconBlockElectra",
|
||||
"SignedBeaconBlockElectra",
|
||||
"SignedBlindedBeaconBlockCapella",
|
||||
"BlindedBeaconBlockCapella",
|
||||
"BlindedBeaconBlockBodyCapella",
|
||||
"SignedBlindedBeaconBlockDeneb",
|
||||
"BeaconBlockContentsDeneb",
|
||||
"SignedBeaconBlockContentsDeneb",
|
||||
"BlindedBeaconBlockDeneb",
|
||||
"BlindedBeaconBlockBodyDeneb",
|
||||
"SignedBlindedBeaconBlockElectra",
|
||||
"BlindedBeaconBlockElectra",
|
||||
"BlindedBeaconBlockBodyElectra",
|
||||
"SyncAggregate",
|
||||
"SyncCommitteeMessage",
|
||||
"SyncCommitteeContribution",
|
||||
"ContributionAndProof",
|
||||
"SignedContributionAndProof",
|
||||
"BeaconBlocksByRangeRequest",
|
||||
"BlobSidecarsByRangeRequest",
|
||||
"ENRForkID",
|
||||
"MetaDataV0",
|
||||
"MetaDataV1",
|
||||
"Status",
|
||||
"AggregateAttestationAndProof",
|
||||
"AggregateAttestationAndProofElectra",
|
||||
"Attestation",
|
||||
"AttestationElectra",
|
||||
"AttestationData",
|
||||
"AttesterSlashing",
|
||||
"AttesterSlashingElectra",
|
||||
"BeaconBlock",
|
||||
"BeaconBlockHeader",
|
||||
"Checkpoint",
|
||||
"Deposit",
|
||||
"Eth1Data",
|
||||
"IndexedAttestation",
|
||||
"IndexedAttestationElectra",
|
||||
"ProposerSlashing",
|
||||
"SignedAggregateAttestationAndProof",
|
||||
"SignedAggregateAttestationAndProofElectra",
|
||||
"SignedBeaconBlock",
|
||||
"SignedBeaconBlockHeader",
|
||||
"SignedVoluntaryExit",
|
||||
"Validator",
|
||||
"VoluntaryExit",
|
||||
"ContributionAndProof",
|
||||
"SignedContributionAndProof",
|
||||
"DepositMessage",
|
||||
"Fork",
|
||||
"ForkData",
|
||||
"HistoricalBatch",
|
||||
"Status",
|
||||
"BeaconState",
|
||||
"BeaconStateAltair",
|
||||
"BeaconStateBellatrix",
|
||||
"BeaconStateCapella",
|
||||
"BeaconStateDeneb",
|
||||
"BeaconStateElectra",
|
||||
"SigningData",
|
||||
"SyncCommittee",
|
||||
"SyncAggregatorSelectionData",
|
||||
"PowBlock",
|
||||
"SignedValidatorRegistrationV1",
|
||||
"ValidatorRegistrationV1",
|
||||
"Withdrawal",
|
||||
"BLSToExecutionChange",
|
||||
"SignedBLSToExecutionChange",
|
||||
"BuilderBid",
|
||||
"BuilderBidCapella",
|
||||
"BuilderBidDeneb",
|
||||
"BlobSidecar",
|
||||
"BlobSidecars",
|
||||
"BlobIdentifier",
|
||||
"DepositSnapshot",
|
||||
"PendingBalanceDeposit",
|
||||
"PendingPartialWithdrawal",
|
||||
"Consolidation",
|
||||
"SignedConsolidation",
|
||||
"PendingConsolidation",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -352,18 +212,7 @@ go_library(
|
||||
"eip_7251.go",
|
||||
"sync_committee_mainnet.go",
|
||||
"sync_committee_minimal.go", # keep
|
||||
":ssz_generated_non_core", # keep
|
||||
#":ssz_generated_altair", # keep
|
||||
#":ssz_generated_bellatrix", # keep
|
||||
#":ssz_generated_capella", # keep
|
||||
#":ssz_generated_deneb", # keep
|
||||
#":ssz_generated_electra", # keep
|
||||
":methodical_phase0", # keep
|
||||
":methodical_altair", # keep
|
||||
":methodical_bellatrix", # keep
|
||||
":methodical_capella", # keep
|
||||
":methodical_deneb", # keep
|
||||
":methodical_electra", # keep
|
||||
":ssz_generated_files", # keep
|
||||
],
|
||||
embed = [
|
||||
":go_grpc_gateway_library",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
20713
proto/prysm/v1alpha1/generated.ssz.go
Normal file
20713
proto/prysm/v1alpha1/generated.ssz.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,780 +0,0 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 9da8a498bd074553137a73197dadcae4d3b4239484f64bab4f0a734dce528d24
|
||||
package eth
|
||||
|
||||
import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
github_com_prysmaticlabs_prysm_v5_consensus_types_primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// MarshalSSZ ssz marshals the ValidatorRegistrationV1 object
|
||||
func (v *ValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(v)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the ValidatorRegistrationV1 object to a target array
|
||||
func (v *ValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'FeeRecipient'
|
||||
if size := len(v.FeeRecipient); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
|
||||
return
|
||||
}
|
||||
dst = append(dst, v.FeeRecipient...)
|
||||
|
||||
// Field (1) 'GasLimit'
|
||||
dst = ssz.MarshalUint64(dst, v.GasLimit)
|
||||
|
||||
// Field (2) 'Timestamp'
|
||||
dst = ssz.MarshalUint64(dst, v.Timestamp)
|
||||
|
||||
// Field (3) 'Pubkey'
|
||||
if size := len(v.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, v.Pubkey...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the ValidatorRegistrationV1 object
|
||||
func (v *ValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 84 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'FeeRecipient'
|
||||
if cap(v.FeeRecipient) == 0 {
|
||||
v.FeeRecipient = make([]byte, 0, len(buf[0:20]))
|
||||
}
|
||||
v.FeeRecipient = append(v.FeeRecipient, buf[0:20]...)
|
||||
|
||||
// Field (1) 'GasLimit'
|
||||
v.GasLimit = ssz.UnmarshallUint64(buf[20:28])
|
||||
|
||||
// Field (2) 'Timestamp'
|
||||
v.Timestamp = ssz.UnmarshallUint64(buf[28:36])
|
||||
|
||||
// Field (3) 'Pubkey'
|
||||
if cap(v.Pubkey) == 0 {
|
||||
v.Pubkey = make([]byte, 0, len(buf[36:84]))
|
||||
}
|
||||
v.Pubkey = append(v.Pubkey, buf[36:84]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ValidatorRegistrationV1 object
|
||||
func (v *ValidatorRegistrationV1) SizeSSZ() (size int) {
|
||||
size = 84
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the ValidatorRegistrationV1 object
|
||||
func (v *ValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(v)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the ValidatorRegistrationV1 object with a hasher
|
||||
func (v *ValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'FeeRecipient'
|
||||
if size := len(v.FeeRecipient); size != 20 {
|
||||
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(v.FeeRecipient)
|
||||
|
||||
// Field (1) 'GasLimit'
|
||||
hh.PutUint64(v.GasLimit)
|
||||
|
||||
// Field (2) 'Timestamp'
|
||||
hh.PutUint64(v.Timestamp)
|
||||
|
||||
// Field (3) 'Pubkey'
|
||||
if size := len(v.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(v.Pubkey)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the SignedValidatorRegistrationV1 object
|
||||
func (s *SignedValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the SignedValidatorRegistrationV1 object to a target array
|
||||
func (s *SignedValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ValidatorRegistrationV1)
|
||||
}
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the SignedValidatorRegistrationV1 object
|
||||
func (s *SignedValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 180 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(ValidatorRegistrationV1)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:84]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[84:180]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[84:180]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedValidatorRegistrationV1 object
|
||||
func (s *SignedValidatorRegistrationV1) SizeSSZ() (size int) {
|
||||
size = 180
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the SignedValidatorRegistrationV1 object
|
||||
func (s *SignedValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(s)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the SignedValidatorRegistrationV1 object with a hasher
|
||||
func (s *SignedValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Message'
|
||||
if err = s.Message.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the BuilderBid object
|
||||
func (b *BuilderBid) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the BuilderBid object to a target array
|
||||
func (b *BuilderBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(84)
|
||||
|
||||
// Offset (0) 'Header'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if b.Header == nil {
|
||||
b.Header = new(v1.ExecutionPayloadHeader)
|
||||
}
|
||||
offset += b.Header.SizeSSZ()
|
||||
|
||||
// Field (1) 'Value'
|
||||
if size := len(b.Value); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.Value...)
|
||||
|
||||
// Field (2) 'Pubkey'
|
||||
if size := len(b.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.Pubkey...)
|
||||
|
||||
// Field (0) 'Header'
|
||||
if dst, err = b.Header.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the BuilderBid object
|
||||
func (b *BuilderBid) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 84 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Header'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 84 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'Value'
|
||||
if cap(b.Value) == 0 {
|
||||
b.Value = make([]byte, 0, len(buf[4:36]))
|
||||
}
|
||||
b.Value = append(b.Value, buf[4:36]...)
|
||||
|
||||
// Field (2) 'Pubkey'
|
||||
if cap(b.Pubkey) == 0 {
|
||||
b.Pubkey = make([]byte, 0, len(buf[36:84]))
|
||||
}
|
||||
b.Pubkey = append(b.Pubkey, buf[36:84]...)
|
||||
|
||||
// Field (0) 'Header'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
if b.Header == nil {
|
||||
b.Header = new(v1.ExecutionPayloadHeader)
|
||||
}
|
||||
if err = b.Header.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BuilderBid object
|
||||
func (b *BuilderBid) SizeSSZ() (size int) {
|
||||
size = 84
|
||||
|
||||
// Field (0) 'Header'
|
||||
if b.Header == nil {
|
||||
b.Header = new(v1.ExecutionPayloadHeader)
|
||||
}
|
||||
size += b.Header.SizeSSZ()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the BuilderBid object
|
||||
func (b *BuilderBid) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the BuilderBid object with a hasher
|
||||
func (b *BuilderBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Header'
|
||||
if err = b.Header.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Value'
|
||||
if size := len(b.Value); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.Value)
|
||||
|
||||
// Field (2) 'Pubkey'
|
||||
if size := len(b.Pubkey); size != 48 {
|
||||
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.Pubkey)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the BeaconBlocksByRangeRequest object
|
||||
func (b *BeaconBlocksByRangeRequest) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the BeaconBlocksByRangeRequest object to a target array
|
||||
func (b *BeaconBlocksByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
|
||||
|
||||
// Field (1) 'Count'
|
||||
dst = ssz.MarshalUint64(dst, b.Count)
|
||||
|
||||
// Field (2) 'Step'
|
||||
dst = ssz.MarshalUint64(dst, b.Step)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the BeaconBlocksByRangeRequest object
|
||||
func (b *BeaconBlocksByRangeRequest) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 24 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
|
||||
|
||||
// Field (1) 'Count'
|
||||
b.Count = ssz.UnmarshallUint64(buf[8:16])
|
||||
|
||||
// Field (2) 'Step'
|
||||
b.Step = ssz.UnmarshallUint64(buf[16:24])
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlocksByRangeRequest object
|
||||
func (b *BeaconBlocksByRangeRequest) SizeSSZ() (size int) {
|
||||
size = 24
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the BeaconBlocksByRangeRequest object
|
||||
func (b *BeaconBlocksByRangeRequest) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the BeaconBlocksByRangeRequest object with a hasher
|
||||
func (b *BeaconBlocksByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
hh.PutUint64(uint64(b.StartSlot))
|
||||
|
||||
// Field (1) 'Count'
|
||||
hh.PutUint64(b.Count)
|
||||
|
||||
// Field (2) 'Step'
|
||||
hh.PutUint64(b.Step)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the MetaDataV0 object
|
||||
func (m *MetaDataV0) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(m)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the MetaDataV0 object to a target array
|
||||
func (m *MetaDataV0) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
dst = ssz.MarshalUint64(dst, m.SeqNumber)
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if size := len(m.Attnets); size != 8 {
|
||||
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
|
||||
return
|
||||
}
|
||||
dst = append(dst, m.Attnets...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the MetaDataV0 object
|
||||
func (m *MetaDataV0) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 16 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if cap(m.Attnets) == 0 {
|
||||
m.Attnets = make([]byte, 0, len(buf[8:16]))
|
||||
}
|
||||
m.Attnets = append(m.Attnets, buf[8:16]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV0 object
|
||||
func (m *MetaDataV0) SizeSSZ() (size int) {
|
||||
size = 16
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the MetaDataV0 object
|
||||
func (m *MetaDataV0) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(m)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the MetaDataV0 object with a hasher
|
||||
func (m *MetaDataV0) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
hh.PutUint64(m.SeqNumber)
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if size := len(m.Attnets); size != 8 {
|
||||
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(m.Attnets)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the MetaDataV1 object
|
||||
func (m *MetaDataV1) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(m)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the MetaDataV1 object to a target array
|
||||
func (m *MetaDataV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
dst = ssz.MarshalUint64(dst, m.SeqNumber)
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if size := len(m.Attnets); size != 8 {
|
||||
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
|
||||
return
|
||||
}
|
||||
dst = append(dst, m.Attnets...)
|
||||
|
||||
// Field (2) 'Syncnets'
|
||||
if size := len(m.Syncnets); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
|
||||
return
|
||||
}
|
||||
dst = append(dst, m.Syncnets...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the MetaDataV1 object
|
||||
func (m *MetaDataV1) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 17 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if cap(m.Attnets) == 0 {
|
||||
m.Attnets = make([]byte, 0, len(buf[8:16]))
|
||||
}
|
||||
m.Attnets = append(m.Attnets, buf[8:16]...)
|
||||
|
||||
// Field (2) 'Syncnets'
|
||||
if cap(m.Syncnets) == 0 {
|
||||
m.Syncnets = make([]byte, 0, len(buf[16:17]))
|
||||
}
|
||||
m.Syncnets = append(m.Syncnets, buf[16:17]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV1 object
|
||||
func (m *MetaDataV1) SizeSSZ() (size int) {
|
||||
size = 17
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the MetaDataV1 object
|
||||
func (m *MetaDataV1) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(m)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the MetaDataV1 object with a hasher
|
||||
func (m *MetaDataV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'SeqNumber'
|
||||
hh.PutUint64(m.SeqNumber)
|
||||
|
||||
// Field (1) 'Attnets'
|
||||
if size := len(m.Attnets); size != 8 {
|
||||
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(m.Attnets)
|
||||
|
||||
// Field (2) 'Syncnets'
|
||||
if size := len(m.Syncnets); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(m.Syncnets)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the BlobSidecarsByRangeRequest object
|
||||
func (b *BlobSidecarsByRangeRequest) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the BlobSidecarsByRangeRequest object to a target array
|
||||
func (b *BlobSidecarsByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
|
||||
|
||||
// Field (1) 'Count'
|
||||
dst = ssz.MarshalUint64(dst, b.Count)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the BlobSidecarsByRangeRequest object
|
||||
func (b *BlobSidecarsByRangeRequest) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 16 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
|
||||
|
||||
// Field (1) 'Count'
|
||||
b.Count = ssz.UnmarshallUint64(buf[8:16])
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BlobSidecarsByRangeRequest object
|
||||
func (b *BlobSidecarsByRangeRequest) SizeSSZ() (size int) {
|
||||
size = 16
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the BlobSidecarsByRangeRequest object
|
||||
func (b *BlobSidecarsByRangeRequest) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the BlobSidecarsByRangeRequest object with a hasher
|
||||
func (b *BlobSidecarsByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'StartSlot'
|
||||
hh.PutUint64(uint64(b.StartSlot))
|
||||
|
||||
// Field (1) 'Count'
|
||||
hh.PutUint64(b.Count)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the DepositSnapshot object
|
||||
func (d *DepositSnapshot) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(d)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the DepositSnapshot object to a target array
|
||||
func (d *DepositSnapshot) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(84)
|
||||
|
||||
// Offset (0) 'Finalized'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(d.Finalized) * 32
|
||||
|
||||
// Field (1) 'DepositRoot'
|
||||
if size := len(d.DepositRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.DepositRoot...)
|
||||
|
||||
// Field (2) 'DepositCount'
|
||||
dst = ssz.MarshalUint64(dst, d.DepositCount)
|
||||
|
||||
// Field (3) 'ExecutionHash'
|
||||
if size := len(d.ExecutionHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.ExecutionHash...)
|
||||
|
||||
// Field (4) 'ExecutionDepth'
|
||||
dst = ssz.MarshalUint64(dst, d.ExecutionDepth)
|
||||
|
||||
// Field (0) 'Finalized'
|
||||
if size := len(d.Finalized); size > 32 {
|
||||
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(d.Finalized); ii++ {
|
||||
if size := len(d.Finalized[ii]); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.Finalized[ii]", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, d.Finalized[ii]...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the DepositSnapshot object
|
||||
func (d *DepositSnapshot) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 84 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Finalized'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 84 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'DepositRoot'
|
||||
if cap(d.DepositRoot) == 0 {
|
||||
d.DepositRoot = make([]byte, 0, len(buf[4:36]))
|
||||
}
|
||||
d.DepositRoot = append(d.DepositRoot, buf[4:36]...)
|
||||
|
||||
// Field (2) 'DepositCount'
|
||||
d.DepositCount = ssz.UnmarshallUint64(buf[36:44])
|
||||
|
||||
// Field (3) 'ExecutionHash'
|
||||
if cap(d.ExecutionHash) == 0 {
|
||||
d.ExecutionHash = make([]byte, 0, len(buf[44:76]))
|
||||
}
|
||||
d.ExecutionHash = append(d.ExecutionHash, buf[44:76]...)
|
||||
|
||||
// Field (4) 'ExecutionDepth'
|
||||
d.ExecutionDepth = ssz.UnmarshallUint64(buf[76:84])
|
||||
|
||||
// Field (0) 'Finalized'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
num, err := ssz.DivideInt2(len(buf), 32, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Finalized = make([][]byte, num)
|
||||
for ii := 0; ii < num; ii++ {
|
||||
if cap(d.Finalized[ii]) == 0 {
|
||||
d.Finalized[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32]))
|
||||
}
|
||||
d.Finalized[ii] = append(d.Finalized[ii], buf[ii*32:(ii+1)*32]...)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the DepositSnapshot object
|
||||
func (d *DepositSnapshot) SizeSSZ() (size int) {
|
||||
size = 84
|
||||
|
||||
// Field (0) 'Finalized'
|
||||
size += len(d.Finalized) * 32
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the DepositSnapshot object
|
||||
func (d *DepositSnapshot) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(d)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the DepositSnapshot object with a hasher
|
||||
func (d *DepositSnapshot) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Finalized'
|
||||
{
|
||||
if size := len(d.Finalized); size > 32 {
|
||||
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range d.Finalized {
|
||||
if len(i) != 32 {
|
||||
err = ssz.ErrBytesLength
|
||||
return
|
||||
}
|
||||
hh.Append(i)
|
||||
}
|
||||
|
||||
numItems := uint64(len(d.Finalized))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 32)
|
||||
}
|
||||
|
||||
// Field (1) 'DepositRoot'
|
||||
if size := len(d.DepositRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(d.DepositRoot)
|
||||
|
||||
// Field (2) 'DepositCount'
|
||||
hh.PutUint64(d.DepositCount)
|
||||
|
||||
// Field (3) 'ExecutionHash'
|
||||
if size := len(d.ExecutionHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(d.ExecutionHash)
|
||||
|
||||
// Field (4) 'ExecutionDepth'
|
||||
hh.PutUint64(d.ExecutionDepth)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,38 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
alias(
|
||||
name = "methodicalgen",
|
||||
actual = "@com_github_offchainlabs_methodical_ssz//cmd/ssz:ssz",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bazel.go",
|
||||
"bazel_json_builder.go",
|
||||
"build_context.go",
|
||||
"driver_request.go",
|
||||
"flatpackage.go",
|
||||
"json_packages_driver.go",
|
||||
"main.go",
|
||||
"packageregistry.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "genception",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["gopackagesdriver_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@io_bazel_rules_go//go/tools/bazel_testing:go_default_library"],
|
||||
)
|
||||
@@ -1,16 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception/cmd",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//tools/genception/driver:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "cmd",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,86 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/tools/genception/driver"
|
||||
)
|
||||
|
||||
var log = driver.Logger
|
||||
|
||||
func run(_ context.Context, in io.Reader, out io.Writer, args []string) error {
|
||||
rec, err := driver.NewRecorder()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize recorder: %w", err)
|
||||
}
|
||||
resolver, err := driver.NewPathResolver()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize path resolver: %w", err)
|
||||
}
|
||||
jsonFiles, err := driver.LoadJsonListing()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup package: %w", err)
|
||||
}
|
||||
pd, err := driver.NewJSONPackagesDriver(jsonFiles, resolver.Resolve)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load JSON files: %w", err)
|
||||
}
|
||||
|
||||
request, err := driver.ReadDriverRequest(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read request: %w", err)
|
||||
}
|
||||
if err := rec.RecordRequest(args, request); err != nil {
|
||||
return fmt.Errorf("unable to record request: %w", err)
|
||||
}
|
||||
// Note: we are returning all files required to build a specific package.
|
||||
// For file queries (`file=`), this means that the CompiledGoFiles will
|
||||
// include more than the only file being specified.
|
||||
resp := pd.Handle(request, args)
|
||||
if err := rec.RecordResponse(resp); err != nil {
|
||||
return fmt.Errorf("unable to record response: %w", err)
|
||||
}
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal response: %v", err)
|
||||
}
|
||||
_, err = out.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
log.WithField("args", strings.Join(os.Args[1:], " ")).Info("genception lookup")
|
||||
if err := run(ctx, os.Stdin, os.Stdout, os.Args[1:]); err != nil {
|
||||
_, err := fmt.Fprintf(os.Stderr, "error: %v", err)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unhandled error in package resolution")
|
||||
}
|
||||
// gopls will check the packages driver exit code, and if there is an
|
||||
// error, it will fall back to go list. Obviously we don't want that,
|
||||
// so force a 0 exit code.
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bazel_json_builder.go",
|
||||
"build_context.go",
|
||||
"driver_request.go",
|
||||
"flatpackage.go",
|
||||
"index.go",
|
||||
"json_packages_driver.go",
|
||||
"logger.go",
|
||||
"packageregistry.go",
|
||||
"recorder.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception/driver",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"index_test.go",
|
||||
"packageregistry_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
@@ -1,156 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var RulesGoStdlibLabel = "@io_bazel_rules_go//:stdlib"
|
||||
|
||||
/*
|
||||
type BazelJSONBuilder struct {
|
||||
packagesBaseDir string
|
||||
includeTests bool
|
||||
}
|
||||
|
||||
|
||||
var _defaultKinds = []string{"go_library", "go_test", "go_binary"}
|
||||
|
||||
var externalRe = regexp.MustCompile(`.*\/external\/([^\/]+)(\/(.*))?\/([^\/]+.go)`)
|
||||
|
||||
func (b *BazelJSONBuilder) fileQuery(filename string) string {
|
||||
label := filename
|
||||
|
||||
if strings.HasPrefix(filename, "./") {
|
||||
label = strings.TrimPrefix(filename, "./")
|
||||
}
|
||||
|
||||
if matches := externalRe.FindStringSubmatch(filename); len(matches) == 5 {
|
||||
// if filepath is for a third party lib, we need to know, what external
|
||||
// library this file is part of.
|
||||
matches = append(matches[:2], matches[3:]...)
|
||||
label = fmt.Sprintf("@%s//%s", matches[1], strings.Join(matches[2:], ":"))
|
||||
}
|
||||
|
||||
relToBin, err := filepath.Rel(b.bazel.info["output_path"], filename)
|
||||
if err == nil && !strings.HasPrefix(relToBin, "../") {
|
||||
parts := strings.SplitN(relToBin, string(filepath.Separator), 3)
|
||||
relToBin = parts[2]
|
||||
// We've effectively converted filename from bazel-bin/some/path.go to some/path.go;
|
||||
// Check if a BUILD.bazel files exists under this dir, if not walk up and repeat.
|
||||
relToBin = filepath.Dir(relToBin)
|
||||
_, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
|
||||
for errors.Is(err, os.ErrNotExist) && relToBin != "." {
|
||||
relToBin = filepath.Dir(relToBin)
|
||||
_, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// return package path found and build all targets (codegen doesn't fall under go_library)
|
||||
// Otherwise fallback to default
|
||||
if relToBin == "." {
|
||||
relToBin = ""
|
||||
}
|
||||
label = fmt.Sprintf("//%s:all", relToBin)
|
||||
}
|
||||
}
|
||||
|
||||
return label
|
||||
}
|
||||
|
||||
func isLocalImport(path string) bool {
|
||||
return path == "." || path == ".." ||
|
||||
strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") ||
|
||||
filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func NewBazelJSONBuilder(includeTests bool) (*BazelJSONBuilder, error) {
|
||||
return &BazelJSONBuilder{
|
||||
includeTests: includeTests,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BazelJSONBuilder) Labels(ctx context.Context, requests []string) ([]string, error) {
|
||||
ret := make([]string, 0, len(requests))
|
||||
for _, request := range requests {
|
||||
result := ""
|
||||
if strings.HasSuffix(request, ".go") {
|
||||
f := strings.TrimPrefix(request, "file=")
|
||||
result = b.fileQuery(f)
|
||||
} else if request == "builtin" || request == "std" {
|
||||
result = fmt.Sprintf(RulesGoStdlibLabel)
|
||||
}
|
||||
|
||||
if result != "" {
|
||||
ret = append(ret, result)
|
||||
}
|
||||
}
|
||||
if len(ret) == 0 {
|
||||
return []string{RulesGoStdlibLabel}, nil
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (b *BazelJSONBuilder) PathResolver() PathResolverFunc {
|
||||
return func(p string) string {
|
||||
p = strings.Replace(p, "__BAZEL_EXECROOT__", os.Getenv("PWD"), 1)
|
||||
p = strings.Replace(p, "__BAZEL_OUTPUT_BASE__", b.packagesBaseDir, 1)
|
||||
return p
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func NewPathResolver() (*PathResolver, error) {
|
||||
outBase, err := PackagesBaseFromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PathResolver{
|
||||
execRoot: os.Getenv("PWD"),
|
||||
outputBase: outBase,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type PathResolver struct {
|
||||
outputBase string
|
||||
execRoot string
|
||||
}
|
||||
|
||||
const (
|
||||
prefixExecRoot = "__BAZEL_EXECROOT__"
|
||||
prefixOutputBase = "__BAZEL_OUTPUT_BASE__"
|
||||
prefixWorkspace = "__BAZEL_WORKSPACE__"
|
||||
)
|
||||
|
||||
var prefixes = []string{prefixExecRoot, prefixOutputBase, prefixWorkspace}
|
||||
|
||||
func (r PathResolver) Resolve(path string) string {
|
||||
for _, prefix := range prefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
for _, rpl := range []string{r.execRoot, r.outputBase} {
|
||||
rp := strings.Replace(path, prefix, rpl, 1)
|
||||
_, err := os.Stat(rp)
|
||||
if err == nil {
|
||||
return rp
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
}
|
||||
log.WithField("path", path).Warn("unrecognized path prefix when resolving source paths in json import metadata")
|
||||
return path
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var buildContext = makeBuildContext()
|
||||
|
||||
func makeBuildContext() *build.Context {
|
||||
bctx := build.Default
|
||||
bctx.BuildTags = strings.Split(getenvDefault("GOTAGS", ""), ",")
|
||||
|
||||
return &bctx
|
||||
}
|
||||
|
||||
func filterSourceFilesForTags(files []string) []string {
|
||||
ret := make([]string, 0, len(files))
|
||||
|
||||
for _, f := range files {
|
||||
dir, filename := filepath.Split(f)
|
||||
ext := filepath.Ext(f)
|
||||
|
||||
match, err := buildContext.MatchFile(dir, filename)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", f).Warn("error matching file")
|
||||
}
|
||||
// MatchFile filters out anything without a file extension. In the
|
||||
// case of CompiledGoFiles (in particular gco processed files from
|
||||
// the cache), we want them.
|
||||
if match || ext == "" {
|
||||
ret = append(ret, f)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func getenvDefault(key, defaultValue string) string {
|
||||
if v, ok := os.LookupEnv(key); ok {
|
||||
return v
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// From https://pkg.go.dev/golang.org/x/tools/go/packages#LoadMode
|
||||
type LoadMode int
|
||||
|
||||
// Only NeedExportsFile is needed in our case
|
||||
const (
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
|
||||
// NeedFiles adds GoFiles and OtherFiles.
|
||||
NeedFiles
|
||||
|
||||
// NeedCompiledGoFiles adds CompiledGoFiles.
|
||||
NeedCompiledGoFiles
|
||||
|
||||
// NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
|
||||
// "placeholder" Packages with only the ID set.
|
||||
NeedImports
|
||||
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
|
||||
NeedDeps
|
||||
|
||||
// NeedExportsFile adds ExportFile.
|
||||
NeedExportFile
|
||||
|
||||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
NeedTypes
|
||||
|
||||
// NeedSyntax adds Syntax.
|
||||
NeedSyntax
|
||||
|
||||
// NeedTypesInfo adds TypesInfo.
|
||||
NeedTypesInfo
|
||||
|
||||
// NeedTypesSizes adds TypesSizes.
|
||||
NeedTypesSizes
|
||||
|
||||
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
|
||||
// Modifies CompiledGoFiles and Types, and has no effect on its own.
|
||||
typecheckCgo
|
||||
|
||||
// NeedModule adds Module.
|
||||
NeedModule
|
||||
)
|
||||
|
||||
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
|
||||
const NeedExportsFile = NeedExportFile
|
||||
|
||||
// From https://github.com/golang/tools/blob/v0.1.0/go/packages/external.go#L32
|
||||
// Most fields are disabled since there is no need for them
|
||||
type DriverRequest struct {
|
||||
Mode LoadMode `json:"mode"`
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
// Env []string `json:"env"`
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
// BuildFlags []string `json:"build_flags"`
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
// Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
func ReadDriverRequest(r io.Reader) (*DriverRequest, error) {
|
||||
req := &DriverRequest{}
|
||||
if err := json.NewDecoder(r).Decode(&req); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode driver request: %w", err)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ResolvePkgFunc func(importPath string) string
|
||||
|
||||
// Copy and pasted from golang.org/x/tools/go/packages
|
||||
type FlatPackagesError struct {
|
||||
Pos string // "file:line:col" or "file:line" or "" or "-"
|
||||
Msg string
|
||||
Kind FlatPackagesErrorKind
|
||||
}
|
||||
|
||||
type FlatPackagesErrorKind int
|
||||
|
||||
const (
|
||||
UnknownError FlatPackagesErrorKind = iota
|
||||
ListError
|
||||
ParseError
|
||||
TypeError
|
||||
)
|
||||
|
||||
func (err FlatPackagesError) Error() string {
|
||||
pos := err.Pos
|
||||
if pos == "" {
|
||||
pos = "-" // like token.Position{}.String()
|
||||
}
|
||||
return pos + ": " + err.Msg
|
||||
}
|
||||
|
||||
// FlatPackage is the JSON form of Package
|
||||
// It drops all the type and syntax fields, and transforms the Imports
|
||||
type FlatPackage struct {
|
||||
ID string
|
||||
Name string `json:",omitempty"`
|
||||
PkgPath string `json:",omitempty"`
|
||||
Errors []FlatPackagesError `json:",omitempty"`
|
||||
GoFiles []string `json:",omitempty"`
|
||||
CompiledGoFiles []string `json:",omitempty"`
|
||||
OtherFiles []string `json:",omitempty"`
|
||||
ExportFile string `json:",omitempty"`
|
||||
Imports map[string]string `json:",omitempty"`
|
||||
Standard bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
PackageFunc func(pkg *FlatPackage)
|
||||
PathResolverFunc func(path string) string
|
||||
)
|
||||
|
||||
func resolvePathsInPlace(prf PathResolverFunc, paths []string) {
|
||||
for i, path := range paths {
|
||||
paths[i] = prf(path)
|
||||
}
|
||||
}
|
||||
|
||||
func WalkFlatPackagesFromJSON(jsonFile string, onPkg PackageFunc) error {
|
||||
f, err := os.Open(jsonFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open package JSON file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).WithField("file", f.Name()).Error("unable to close file")
|
||||
}
|
||||
}()
|
||||
|
||||
decoder := json.NewDecoder(f)
|
||||
for decoder.More() {
|
||||
pkg := &FlatPackage{}
|
||||
if err := decoder.Decode(&pkg); err != nil {
|
||||
return fmt.Errorf("unable to decode package in %s: %w", f.Name(), err)
|
||||
}
|
||||
|
||||
onPkg(pkg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) ResolvePaths(prf PathResolverFunc) {
|
||||
resolvePathsInPlace(prf, fp.CompiledGoFiles)
|
||||
resolvePathsInPlace(prf, fp.GoFiles)
|
||||
resolvePathsInPlace(prf, fp.OtherFiles)
|
||||
fp.ExportFile = prf(fp.ExportFile)
|
||||
}
|
||||
|
||||
// FilterFilesForBuildTags filters the source files given the current build
|
||||
// tags.
|
||||
func (fp *FlatPackage) FilterFilesForBuildTags() {
|
||||
fp.GoFiles = filterSourceFilesForTags(fp.GoFiles)
|
||||
fp.CompiledGoFiles = filterSourceFilesForTags(fp.CompiledGoFiles)
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) filterTestSuffix(files []string) (err error, testFiles []string, xTestFiles, nonTestFiles []string) {
|
||||
for _, filename := range files {
|
||||
if strings.HasSuffix(filename, "_test.go") {
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, filename, nil, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err, nil, nil, nil
|
||||
}
|
||||
if f.Name.Name == fp.Name {
|
||||
testFiles = append(testFiles, filename)
|
||||
} else {
|
||||
xTestFiles = append(xTestFiles, filename)
|
||||
}
|
||||
} else {
|
||||
nonTestFiles = append(nonTestFiles, filename)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) MoveTestFiles() *FlatPackage {
|
||||
err, tgf, xtgf, gf := fp.filterTestSuffix(fp.GoFiles)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fp.GoFiles = append(gf, tgf...)
|
||||
fp.CompiledGoFiles = append(gf, tgf...)
|
||||
|
||||
if len(xtgf) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
newImports := make(map[string]string, len(fp.Imports))
|
||||
for k, v := range fp.Imports {
|
||||
newImports[k] = v
|
||||
}
|
||||
|
||||
newImports[fp.PkgPath] = fp.ID
|
||||
|
||||
// Clone package, only xtgf files
|
||||
return &FlatPackage{
|
||||
ID: fp.ID + "_xtest",
|
||||
Name: fp.Name + "_test",
|
||||
PkgPath: fp.PkgPath + "_test",
|
||||
Imports: newImports,
|
||||
Errors: fp.Errors,
|
||||
GoFiles: append([]string{}, xtgf...),
|
||||
CompiledGoFiles: append([]string{}, xtgf...),
|
||||
OtherFiles: fp.OtherFiles,
|
||||
ExportFile: fp.ExportFile,
|
||||
Standard: fp.Standard,
|
||||
}
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) IsStdlib() bool {
|
||||
return fp.Standard
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) ResolveImports(resolve ResolvePkgFunc) error {
|
||||
// Stdlib packages are already complete import wise
|
||||
if fp.IsStdlib() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
for _, file := range fp.CompiledGoFiles {
|
||||
f, err := parser.ParseFile(fset, file, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the name is not provided, fetch it from the sources
|
||||
if fp.Name == "" {
|
||||
fp.Name = f.Name.Name
|
||||
}
|
||||
|
||||
for _, rawImport := range f.Imports {
|
||||
imp, err := strconv.Unquote(rawImport.Path.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// We don't handle CGo for now
|
||||
if imp == "C" {
|
||||
continue
|
||||
}
|
||||
if _, ok := fp.Imports[imp]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if pkgID := resolve(imp); pkgID != "" {
|
||||
fp.Imports[imp] = pkgID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) IsRoot() bool {
|
||||
return strings.HasPrefix(fp.ID, "//")
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
ENV_JSON_INDEX_PATH = "PACKAGE_JSON_INVENTORY"
|
||||
ENV_PACKAGES_BASE = "PACKAGES_BASE"
|
||||
)
|
||||
|
||||
var ErrUnsetEnvVar = errors.New("required env var not set")
|
||||
|
||||
// LoadJsonListing reads the list of json package index files created by the bazel gopackagesdriver aspect:
|
||||
// https://github.com/bazelbuild/rules_go/blob/master/go/tools/gopackagesdriver/aspect.bzl
|
||||
// This list is serialized as a []string paths, relative to the bazel exec root.
|
||||
func LoadJsonListing() ([]string, error) {
|
||||
path, err := JsonIndexPathFromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadJsonIndex(path)
|
||||
}
|
||||
|
||||
func ReadJsonIndex(path string) ([]string, error) {
|
||||
um := make([]string, 0)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(b, &um); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return um, nil
|
||||
}
|
||||
|
||||
// JsonIndexPathFromEnv reads the path to the json index file from the environment.
|
||||
func JsonIndexPathFromEnv() (string, error) {
|
||||
p := os.Getenv(ENV_JSON_INDEX_PATH)
|
||||
if p == "" {
|
||||
return "", errors.Wrap(ErrUnsetEnvVar, ENV_JSON_INDEX_PATH)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func PackagesBaseFromEnv() (string, error) {
|
||||
p := os.Getenv(ENV_PACKAGES_BASE)
|
||||
if p == "" {
|
||||
return "", errors.Wrap(ErrUnsetEnvVar, ENV_PACKAGES_BASE)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestJsonList(t *testing.T) {
|
||||
path := "testdata/json-list.json"
|
||||
files, err := ReadJsonIndex(path)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(files))
|
||||
}
|
||||
|
||||
func TestJsonIndexPathFromEnv(t *testing.T) {
|
||||
cases := []struct {
|
||||
val string
|
||||
err error
|
||||
envname string
|
||||
getter func() (string, error)
|
||||
}{
|
||||
{
|
||||
getter: JsonIndexPathFromEnv,
|
||||
err: ErrUnsetEnvVar,
|
||||
},
|
||||
{
|
||||
getter: JsonIndexPathFromEnv,
|
||||
envname: ENV_JSON_INDEX_PATH,
|
||||
val: "/path/to/file",
|
||||
},
|
||||
{
|
||||
getter: PackagesBaseFromEnv,
|
||||
err: ErrUnsetEnvVar,
|
||||
},
|
||||
{
|
||||
getter: PackagesBaseFromEnv,
|
||||
envname: ENV_PACKAGES_BASE,
|
||||
val: "/path/to/base",
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
if c.envname != "" {
|
||||
t.Setenv(c.envname, c.val)
|
||||
}
|
||||
v, err := c.getter()
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.val, v)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type JSONPackagesDriver struct {
|
||||
registry *PackageRegistry
|
||||
}
|
||||
|
||||
func NewJSONPackagesDriver(jsonFiles []string, prf PathResolverFunc) (*JSONPackagesDriver, error) {
|
||||
jpd := &JSONPackagesDriver{
|
||||
registry: NewPackageRegistry(),
|
||||
}
|
||||
|
||||
for _, f := range jsonFiles {
|
||||
if err := WalkFlatPackagesFromJSON(f, func(pkg *FlatPackage) {
|
||||
jpd.registry.Add(pkg)
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("unable to walk json: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := jpd.registry.ResolvePaths(prf); err != nil {
|
||||
return nil, fmt.Errorf("unable to resolve paths: %w", err)
|
||||
}
|
||||
|
||||
if err := jpd.registry.ResolveImports(); err != nil {
|
||||
return nil, fmt.Errorf("unable to resolve paths: %w", err)
|
||||
}
|
||||
|
||||
return jpd, nil
|
||||
}
|
||||
|
||||
func (b *JSONPackagesDriver) Handle(req *DriverRequest, queries []string) *driverResponse {
|
||||
r, p := b.registry.Query(req, queries)
|
||||
return &driverResponse{
|
||||
NotHandled: false,
|
||||
Compiler: "gc",
|
||||
Arch: runtime.GOARCH,
|
||||
Roots: r,
|
||||
Packages: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *JSONPackagesDriver) GetResponse(labels []string) *driverResponse {
|
||||
rootPkgs, packages := b.registry.Match(labels)
|
||||
|
||||
return &driverResponse{
|
||||
NotHandled: false,
|
||||
Compiler: "gc",
|
||||
Arch: runtime.GOARCH,
|
||||
Roots: rootPkgs,
|
||||
Packages: packages,
|
||||
}
|
||||
}
|
||||
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*FlatPackage
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
var Logger *logrus.Logger
|
||||
|
||||
func init() {
|
||||
path := os.Getenv("GOPACKAGESDRIVER_LOG_PATH")
|
||||
if path == "" {
|
||||
path = filepath.Join(os.Getenv("PWD"), "genception.log")
|
||||
}
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err == nil {
|
||||
log.Out = file
|
||||
} else {
|
||||
log.Info("Failed to log to file, using default stderr")
|
||||
}
|
||||
Logger = log
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PackageRegistry struct {
|
||||
packages map[string]*FlatPackage
|
||||
stdlib map[string]string
|
||||
}
|
||||
|
||||
func NewPackageRegistry(pkgs ...*FlatPackage) *PackageRegistry {
|
||||
pr := &PackageRegistry{
|
||||
packages: map[string]*FlatPackage{},
|
||||
stdlib: map[string]string{},
|
||||
}
|
||||
pr.Add(pkgs...)
|
||||
return pr
|
||||
}
|
||||
|
||||
func rewritePackage(pkg *FlatPackage) {
|
||||
pkg.ID = pkg.PkgPath
|
||||
for k := range pkg.Imports {
|
||||
// rewrite package ID mapping to be the same as the path
|
||||
pkg.Imports[k] = k
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if a is a superset of b
|
||||
func isSuperset(a, b []string) bool {
|
||||
if len(a) < len(b) {
|
||||
return false
|
||||
}
|
||||
bi := 0
|
||||
for i := range a {
|
||||
if a[i] == b[bi] {
|
||||
bi++
|
||||
if bi == len(b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Update merges the contents of 2 packages together in the instance where they have the same package path.
|
||||
// This can happen when the gopackages aspect traverses to a child label and generates separate json files transitive targets.
|
||||
// For example, in //proto/prysm/v1alpha1 we see both `:go_default_library` and `:go_proto` from `//proto/engine/v1`.
|
||||
// Without the merge, `:go_proto` can overwrite `:go_default_library`, leaving sources files out of the final graph.
|
||||
func (pr *PackageRegistry) Update(pkg *FlatPackage) {
|
||||
existing, ok := pr.packages[pkg.PkgPath]
|
||||
if !ok {
|
||||
pr.packages[pkg.PkgPath] = pkg
|
||||
return
|
||||
}
|
||||
if isSuperset(pkg.GoFiles, existing.GoFiles) {
|
||||
existing.GoFiles = pkg.GoFiles
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Add(pkgs ...*FlatPackage) *PackageRegistry {
|
||||
for _, pkg := range pkgs {
|
||||
rewritePackage(pkg)
|
||||
pr.packages[pkg.PkgPath] = pkg
|
||||
|
||||
if pkg.IsStdlib() {
|
||||
pr.stdlib[pkg.PkgPath] = pkg.ID
|
||||
}
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) ResolvePaths(prf PathResolverFunc) error {
|
||||
for _, pkg := range pr.packages {
|
||||
pkg.ResolvePaths(prf)
|
||||
pkg.FilterFilesForBuildTags()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResolveImports adds stdlib imports to packages. This is required because
|
||||
// stdlib packages are not part of the JSON file exports as bazel is unaware of
|
||||
// them.
|
||||
func (pr *PackageRegistry) ResolveImports() error {
|
||||
resolve := func(importPath string) string {
|
||||
if pkgID, ok := pr.stdlib[importPath]; ok {
|
||||
return pkgID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, pkg := range pr.packages {
|
||||
if err := pkg.ResolveImports(resolve); err != nil {
|
||||
return err
|
||||
}
|
||||
testFp := pkg.MoveTestFiles()
|
||||
if testFp != nil {
|
||||
pr.packages[testFp.ID] = testFp
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) walk(acc map[string]*FlatPackage, root string) {
|
||||
pkg := pr.packages[root]
|
||||
|
||||
if pkg == nil {
|
||||
log.WithField("root", root).Error("package ID not found")
|
||||
return
|
||||
}
|
||||
|
||||
acc[pkg.ID] = pkg
|
||||
for _, pkgID := range pkg.Imports {
|
||||
if _, ok := acc[pkgID]; !ok {
|
||||
pr.walk(acc, pkgID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Query(req *DriverRequest, queries []string) ([]string, []*FlatPackage) {
|
||||
walkedPackages := map[string]*FlatPackage{}
|
||||
retRoots := make([]string, 0, len(queries))
|
||||
for _, rootPkg := range queries {
|
||||
retRoots = append(retRoots, rootPkg)
|
||||
pr.walk(walkedPackages, rootPkg)
|
||||
}
|
||||
|
||||
retPkgs := make([]*FlatPackage, 0, len(walkedPackages))
|
||||
for _, pkg := range walkedPackages {
|
||||
retPkgs = append(retPkgs, pkg)
|
||||
}
|
||||
|
||||
return retRoots, retPkgs
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Match(labels []string) ([]string, []*FlatPackage) {
|
||||
roots := map[string]struct{}{}
|
||||
|
||||
for _, label := range labels {
|
||||
// When packagesdriver is ran from rules go, rulesGoRepositoryName will just be @
|
||||
if !strings.HasPrefix(label, "@") {
|
||||
// Canonical labels is only since Bazel 6.0.0
|
||||
label = fmt.Sprintf("@%s", label)
|
||||
}
|
||||
|
||||
if label == RulesGoStdlibLabel {
|
||||
// For stdlib, we need to append all the subpackages as roots
|
||||
// since RulesGoStdLibLabel doesn't actually show up in the stdlib pkg.json
|
||||
for _, pkg := range pr.packages {
|
||||
if pkg.Standard {
|
||||
roots[pkg.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
roots[label] = struct{}{}
|
||||
// If an xtest package exists for this package add it to the roots
|
||||
if _, ok := pr.packages[label+"_xtest"]; ok {
|
||||
roots[label+"_xtest"] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
walkedPackages := map[string]*FlatPackage{}
|
||||
retRoots := make([]string, 0, len(roots))
|
||||
for rootPkg := range roots {
|
||||
retRoots = append(retRoots, rootPkg)
|
||||
pr.walk(walkedPackages, rootPkg)
|
||||
}
|
||||
|
||||
retPkgs := make([]*FlatPackage, 0, len(walkedPackages))
|
||||
for _, pkg := range walkedPackages {
|
||||
retPkgs = append(retPkgs, pkg)
|
||||
}
|
||||
|
||||
return retRoots, retPkgs
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsSuperset(t *testing.T) {
|
||||
cases := []struct {
|
||||
a []string
|
||||
b []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b"}, true},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c", "d"}, true},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c", "d", "e"}, false},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c"}, true},
|
||||
{[]string{}, []string{"a"}, false},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(strings.Join(c.a, "_")+"__"+strings.Join(c.b, "_"), func(t *testing.T) {
|
||||
if isSuperset(c.a, c.b) != c.expected {
|
||||
t.Errorf("isSuperset(%v, %v) != %v", c.a, c.b, c.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Recorder struct {
|
||||
base string
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func NewRecorder() (*Recorder, error) {
|
||||
base := os.Getenv("PWD")
|
||||
r := &Recorder{base: base, t: time.Now()}
|
||||
if err := r.Mkdir(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
func (r *Recorder) Dir() string {
|
||||
return path.Join(r.base, strconv.FormatInt(r.t.UTC().UnixNano(), 10))
|
||||
}
|
||||
|
||||
func (r *Recorder) Mkdir() error {
|
||||
return os.MkdirAll(r.Dir(), 0755)
|
||||
}
|
||||
|
||||
func (r *Recorder) RecordRequest(args []string, req *DriverRequest) error {
|
||||
b, err := json.Marshal(struct {
|
||||
Args []string
|
||||
Request *DriverRequest
|
||||
}{
|
||||
Args: args,
|
||||
Request: req,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path.Join(r.Dir(), "request.json"), b, 0644)
|
||||
}
|
||||
|
||||
func (r *Recorder) RecordResponse(resp *driverResponse) error {
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path.Join(r.Dir(), "response.json"), b, 0644)
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/io_bazel_rules_go/stdlib_/stdlib.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/com_github_thomaso_mirodin_intmath/constants/c64/c64.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/com_github_thomaso_mirodin_intmath/u64/u64.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/proto/prysm/v1alpha1/go_proto.pkg.json"
|
||||
]
|
||||
@@ -1,127 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", "GoSource", "go_context")
|
||||
load("@io_bazel_rules_go//go/tools/gopackagesdriver:aspect.bzl", "go_pkg_info_aspect", "GoPkgInfo")
|
||||
|
||||
_METHODICAL_TOOL = Label("//tools/genception:methodicalgen")
|
||||
_GENCEPTION_TOOL = Label("//tools/genception/cmd:cmd")
|
||||
_FASTSSZ_DEP = Label("@com_github_prysmaticlabs_fastssz//:go_default_library")
|
||||
|
||||
def _ssz_methodical_impl(ctx):
|
||||
go_ctx = go_context(ctx)
|
||||
all_json_files = {}
|
||||
stdlib = ''
|
||||
inputs = []
|
||||
#inputs += go_ctx.sdk.srcs
|
||||
#inputs += go_ctx.sdk.headers + go_ctx.sdk.srcs + go_ctx.sdk.tools
|
||||
ssz_sources = go_ctx.library_to_source(go_ctx, ctx.attr, ctx.attr.fastssz_lib[GoLibrary], ctx.coverage_instrumented())
|
||||
inputs += ssz_sources.srcs
|
||||
#sample = go_ctx.sdk.srcs[0].path
|
||||
for dep in ctx.attr.deps + [ctx.attr.fastssz_lib]:
|
||||
pkginfo = dep[OutputGroupInfo]
|
||||
if hasattr(pkginfo, "go_generated_srcs"):
|
||||
inputs += pkginfo.go_generated_srcs.to_list()
|
||||
# collect all the paths to json files dict keys for uniqueness
|
||||
json_files = pkginfo.go_pkg_driver_json_file.to_list()
|
||||
inputs += json_files
|
||||
if len(json_files) > 0:
|
||||
for jf in json_files:
|
||||
# presumably path is full path from exec root
|
||||
all_json_files[jf.path] = ""
|
||||
inputs += pkginfo.go_pkg_driver_srcs.to_list()
|
||||
inputs += pkginfo.go_pkg_driver_export_file.to_list()
|
||||
# we just ned to get the stdlib once
|
||||
#if stdlib == '' and hasattr(pkginfo, "go_pkg_driver_stdlib_json_file"):
|
||||
if stdlib == '':
|
||||
std_ds = pkginfo.go_pkg_driver_stdlib_json_file.to_list()
|
||||
if len(std_ds) > 0:
|
||||
stdlib = std_ds[0].path
|
||||
inputs += std_ds
|
||||
# concat the stdlib with all the other json file paths and write to disk
|
||||
json_out = [stdlib] + all_json_files.keys()
|
||||
all_pkg_list = ctx.actions.declare_file("methodical-pkg-list.json")
|
||||
ctx.actions.write(all_pkg_list, content = json.encode(json_out))
|
||||
#echo "sample = {sample}" &&
|
||||
#echo "{out_base}" &&
|
||||
out_base = ctx.outputs.out.root.path
|
||||
|
||||
args = [
|
||||
"gen",
|
||||
"--type-names=" + ",".join(ctx.attr.type_names),
|
||||
"--output=" + ctx.outputs.out.path,
|
||||
]
|
||||
if ctx.attr.target_package_name != "":
|
||||
args.append("--override-package-name=" + ctx.attr.target_package_name)
|
||||
|
||||
# Positional arg, needs to be after other --flags.
|
||||
args.append(ctx.attr.target_package)
|
||||
|
||||
codegen_bins = [ctx.file.genception, ctx.file.methodical_tool]
|
||||
ctx.actions.run_shell(
|
||||
env = {
|
||||
"PACKAGE_JSON_INVENTORY": all_pkg_list.path,
|
||||
"PACKAGES_BASE": out_base,
|
||||
# GOCACHE is required starting in Go 1.12
|
||||
"GOCACHE": "./.gocache",
|
||||
"GOPACKAGESDRIVER": ctx.file.genception.path,
|
||||
"GOPACKAGESDRIVER_LOG_PATH": out_base + "/gopackagesdriver.log",
|
||||
},
|
||||
|
||||
inputs = [all_pkg_list] + inputs + codegen_bins,
|
||||
outputs = [ctx.outputs.out],
|
||||
command = """
|
||||
echo $PACKAGE_JSON_INVENTORY &&
|
||||
echo $PACKAGES_BASE &&
|
||||
echo $PWD &&
|
||||
{cmd} {args}
|
||||
""".format(
|
||||
#sample = sample,
|
||||
out_base = out_base,
|
||||
json_list = all_pkg_list.path,
|
||||
cmd = "$(pwd)/" + ctx.file.methodical_tool.path,
|
||||
args = " ".join(args),
|
||||
out = ctx.outputs.out.path,
|
||||
),
|
||||
)
|
||||
|
||||
ssz_methodical = rule(
|
||||
implementation = _ssz_methodical_impl,
|
||||
attrs = {
|
||||
"type_names": attr.string_list(
|
||||
allow_empty = False,
|
||||
doc = "The names of the Go types to generate methods for.",
|
||||
mandatory = True,
|
||||
),
|
||||
'deps' : attr.label_list(aspects = [go_pkg_info_aspect]),
|
||||
"out": attr.output(
|
||||
doc = "The new Go file to emit the generated mocks into",
|
||||
),
|
||||
"_go_context_data": attr.label(
|
||||
default = "@io_bazel_rules_go//:go_context_data",
|
||||
),
|
||||
"methodical_tool": attr.label(
|
||||
doc = "The methodical tool (binary) to run",
|
||||
default = _METHODICAL_TOOL,
|
||||
allow_single_file = True,
|
||||
executable = True,
|
||||
cfg = "exec",
|
||||
mandatory = False,
|
||||
),
|
||||
"fastssz_lib": attr.label(providers = [GoLibrary], default = _FASTSSZ_DEP, aspects = [go_pkg_info_aspect]),
|
||||
"target_package": attr.string(
|
||||
doc = "The package path containing the types in type_names.",
|
||||
mandatory = True,
|
||||
),
|
||||
"target_package_name": attr.string(
|
||||
doc = "Override the name of the package the generated file is in (eg 'eth' for proto/prysm/v1alpha1)",
|
||||
mandatory = False,
|
||||
),
|
||||
"genception": attr.label(
|
||||
doc = "gopackagesdriver tool for package discovery inside bazel sandbox",
|
||||
default = _GENCEPTION_TOOL,
|
||||
allow_single_file = True,
|
||||
executable = True,
|
||||
cfg = "exec",
|
||||
mandatory = False,
|
||||
),
|
||||
},
|
||||
toolchains = ["@io_bazel_rules_go//go:toolchain"],
|
||||
)
|
||||
@@ -60,9 +60,6 @@ def _ssz_go_proto_library_impl(ctx):
|
||||
if len(ctx.attr.objs) > 0:
|
||||
args.append("--objs=%s" % ",".join(ctx.attr.objs))
|
||||
|
||||
if len(ctx.attr.exclude_objs) > 0:
|
||||
args.append("--exclude-objs=%s" % ",".join(ctx.attr.exclude_objs))
|
||||
|
||||
ctx.actions.run(
|
||||
executable = ctx.executable.sszgen,
|
||||
progress_message = "Generating ssz marshal and unmarshal functions",
|
||||
@@ -82,10 +79,9 @@ ssz_gen_marshal = rule(
|
||||
cfg = "exec",
|
||||
),
|
||||
"objs": attr.string_list(),
|
||||
"exclude_objs": attr.string_list(),
|
||||
"includes": attr.label_list(providers = [GoLibrary]),
|
||||
"out": attr.output(),
|
||||
},
|
||||
outputs = {"out": "generated.ssz.go"},
|
||||
)
|
||||
|
||||
SSZ_DEPS = ["@com_github_prysmaticlabs_fastssz//:go_default_library"]
|
||||
|
||||
@@ -138,9 +138,6 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
|
||||
|
||||
// Signs input slot with domain selection proof. This is used to create the signature for aggregator selection.
|
||||
func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) (signature []byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.signSlotWithSelectionProof")
|
||||
defer span.End()
|
||||
|
||||
domain, err := v.domainData(ctx, slots.ToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -197,9 +194,6 @@ func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slo
|
||||
// This returns the signature of validator signing over aggregate and
|
||||
// proof object.
|
||||
func (v *validator) aggregateAndProofSig(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, agg *ethpb.AggregateAttestationAndProof, slot primitives.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.aggregateAndProofSig")
|
||||
defer span.End()
|
||||
|
||||
d, err := v.domainData(ctx, slots.ToEpoch(agg.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -199,9 +199,6 @@ func (v *validator) duty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.Dutie
|
||||
|
||||
// Given validator's public key, this function returns the signature of an attestation data and its signing root.
|
||||
func (v *validator) signAtt(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, data *ethpb.AttestationData, slot primitives.Slot) ([]byte, [32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.signAtt")
|
||||
defer span.End()
|
||||
|
||||
domain, root, err := v.domainAndSigningRoot(ctx, data)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, err
|
||||
|
||||
@@ -64,7 +64,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestActivation_Nominal(t *testing.T) {
|
||||
|
||||
// Get does not return any result for non existing key
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/beacon/states/head/validators",
|
||||
nil,
|
||||
bytes.NewBuffer(reqBytes),
|
||||
@@ -240,7 +240,7 @@ func TestActivation_InvalidData(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -281,7 +281,7 @@ func TestActivation_JsonResponseError(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -291,7 +291,7 @@ func TestActivation_JsonResponseError(t *testing.T) {
|
||||
).Times(1)
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestGetAttestationData_ValidAttestation(t *testing.T) {
|
||||
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", expectedCommitteeIndex, expectedSlot),
|
||||
&produceAttestationDataResponseJson,
|
||||
).Return(
|
||||
@@ -183,7 +183,7 @@ func TestGetAttestationData_InvalidData(t *testing.T) {
|
||||
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/validator/attestation_data?committee_index=2&slot=1",
|
||||
&produceAttestationDataResponseJson,
|
||||
).Return(
|
||||
@@ -212,7 +212,7 @@ func TestGetAttestationData_JsonResponseError(t *testing.T) {
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
|
||||
&produceAttestationDataResponseJson,
|
||||
).Return(
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
nil,
|
||||
errors.New("foo error"),
|
||||
)
|
||||
@@ -78,7 +78,7 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
nil,
|
||||
errors.New("bar error"),
|
||||
)
|
||||
@@ -96,7 +96,7 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
nil,
|
||||
errors.New("foo error"),
|
||||
)
|
||||
@@ -114,13 +114,13 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), blockHeaderEndpoint, gomock.Any()).Return(errors.New("bar error"))
|
||||
jsonRestHandler.EXPECT().Get(ctx, blockHeaderEndpoint, gomock.Any()).Return(errors.New("bar error"))
|
||||
|
||||
beaconChainClient := beaconApiChainClient{
|
||||
stateValidatorsProvider: stateValidatorsProvider,
|
||||
@@ -187,13 +187,13 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), blockHeaderEndpoint, gomock.Any()).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, blockHeaderEndpoint, gomock.Any()).Return(
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -328,7 +328,7 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
|
||||
testCase.generateStateValidatorsResponse(),
|
||||
nil,
|
||||
)
|
||||
@@ -556,7 +556,7 @@ func TestListValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), primitives.Slot(0), make([]string, 0), []primitives.ValidatorIndex{}, nil).Return(
|
||||
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, primitives.Slot(0), make([]string, 0), []primitives.ValidatorIndex{}, nil).Return(
|
||||
testCase.generateJsonStateValidatorsResponse(),
|
||||
nil,
|
||||
)
|
||||
@@ -745,7 +745,7 @@ func TestGetChainHead(t *testing.T) {
|
||||
|
||||
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
testCase.finalityCheckpointsError,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -844,7 +844,7 @@ func TestGetChainHead(t *testing.T) {
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
|
||||
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -852,7 +852,7 @@ func TestGetChainHead(t *testing.T) {
|
||||
)
|
||||
|
||||
headBlockHeadersResponse := structs.GetBlockHeaderResponse{}
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
|
||||
testCase.headBlockHeadersError,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -874,7 +874,7 @@ func TestGetChainHead(t *testing.T) {
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
|
||||
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -882,7 +882,7 @@ func TestGetChainHead(t *testing.T) {
|
||||
)
|
||||
|
||||
headBlockHeadersResponse := structs.GetBlockHeaderResponse{}
|
||||
jsonRestHandler.EXPECT().Get(gomock.Any(), headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
|
||||
jsonRestHandler.EXPECT().Get(ctx, headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
|
||||
nil,
|
||||
).SetArg(
|
||||
2,
|
||||
@@ -940,7 +940,7 @@ func Test_beaconApiBeaconChainClient_GetValidatorPerformance(t *testing.T) {
|
||||
want := ðpb.ValidatorPerformanceResponse{}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
getValidatorPerformanceEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(request),
|
||||
|
||||
@@ -109,7 +109,7 @@ func TestGetFork_Nominal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
@@ -137,7 +137,7 @@ func TestGetFork_Invalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
@@ -176,7 +176,7 @@ func TestGetHeaders_Nominal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
@@ -204,7 +204,7 @@ func TestGetHeaders_Invalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
@@ -248,7 +248,7 @@ func TestGetLiveness_Nominal(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
livenessEndpoint,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
@@ -275,7 +275,7 @@ func TestGetLiveness_Invalid(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
livenessEndpoint,
|
||||
nil,
|
||||
gomock.Any(),
|
||||
@@ -324,7 +324,7 @@ func TestGetIsSyncing_Nominal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
syncingEndpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
@@ -355,7 +355,7 @@ func TestGetIsSyncing_Invalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
syncingEndpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestGetGenesis(t *testing.T) {
|
||||
|
||||
genesisProvider := mock.NewMockGenesisProvider(ctrl)
|
||||
genesisProvider.EXPECT().Genesis(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
).Return(
|
||||
testCase.genesisResponse,
|
||||
testCase.genesisError,
|
||||
@@ -124,7 +124,7 @@ func TestGetGenesis(t *testing.T) {
|
||||
|
||||
if testCase.queriesDepositContract {
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/config/deposit_contract",
|
||||
&depositContractJson,
|
||||
).Return(
|
||||
@@ -203,7 +203,7 @@ func TestGetSyncStatus(t *testing.T) {
|
||||
syncingResponse := structs.SyncStatusResponse{}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
syncingEndpoint,
|
||||
&syncingResponse,
|
||||
).Return(
|
||||
@@ -267,7 +267,7 @@ func TestGetVersion(t *testing.T) {
|
||||
var versionResponse structs.GetVersionResponse
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
versionEndpoint,
|
||||
&versionResponse,
|
||||
).Return(
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type ValidatorClientOpt func(*beaconApiValidatorClient)
|
||||
@@ -48,16 +47,12 @@ func NewBeaconApiValidatorClient(jsonRestHandler JsonRestHandler, opts ...Valida
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.Duties")
|
||||
defer span.End()
|
||||
return wrapInMetrics[*ethpb.DutiesResponse]("Duties", func() (*ethpb.DutiesResponse, error) {
|
||||
return c.duties(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.CheckDoppelGanger")
|
||||
defer span.End()
|
||||
return wrapInMetrics[*ethpb.DoppelGangerResponse]("CheckDoppelGanger", func() (*ethpb.DoppelGangerResponse, error) {
|
||||
return c.checkDoppelGanger(ctx, in)
|
||||
})
|
||||
@@ -67,10 +62,6 @@ func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.Dom
|
||||
if len(in.Domain) != 4 {
|
||||
return nil, errors.Errorf("invalid domain type: %s", hexutil.Encode(in.Domain))
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.DomainData")
|
||||
defer span.End()
|
||||
|
||||
domainType := bytesutil.ToBytes4(in.Domain)
|
||||
|
||||
return wrapInMetrics[*ethpb.DomainResponse]("DomainData", func() (*ethpb.DomainResponse, error) {
|
||||
@@ -79,18 +70,12 @@ func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.Dom
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) AttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.AttestationData")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.AttestationData]("AttestationData", func() (*ethpb.AttestationData, error) {
|
||||
return c.attestationData(ctx, in.Slot, in.CommitteeIndex)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) BeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.BeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.GenericBeaconBlock]("BeaconBlock", func() (*ethpb.GenericBeaconBlock, error) {
|
||||
return c.beaconBlock(ctx, in.Slot, in.RandaoReveal, in.Graffiti)
|
||||
})
|
||||
@@ -101,72 +86,48 @@ func (c *beaconApiValidatorClient) FeeRecipientByPubKey(_ context.Context, _ *et
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncCommitteeContribution")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.SyncCommitteeContribution]("SyncCommitteeContribution", func() (*ethpb.SyncCommitteeContribution, error) {
|
||||
return c.syncCommitteeContribution(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SyncMessageBlockRoot(ctx context.Context, _ *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncMessageBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.SyncMessageBlockRootResponse]("SyncMessageBlockRoot", func() (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return c.syncMessageBlockRoot(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncSubcommitteeIndex")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.SyncSubcommitteeIndexResponse]("SyncSubcommitteeIndex", func() (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return c.syncSubcommitteeIndex(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.MultipleValidatorStatus")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.MultipleValidatorStatusResponse]("MultipleValidatorStatus", func() (*ethpb.MultipleValidatorStatusResponse, error) {
|
||||
return c.multipleValidatorStatus(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.PrepareBeaconProposer")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*empty.Empty]("PrepareBeaconProposer", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.prepareBeaconProposer(ctx, in.Recipients)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.AttestResponse]("ProposeAttestation", func() (*ethpb.AttestResponse, error) {
|
||||
return c.proposeAttestation(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.ProposeResponse]("ProposeBeaconBlock", func() (*ethpb.ProposeResponse, error) {
|
||||
return c.proposeBeaconBlock(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeExit")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.ProposeExitResponse]("ProposeExit", func() (*ethpb.ProposeExitResponse, error) {
|
||||
return c.proposeExit(ctx, in)
|
||||
})
|
||||
@@ -177,79 +138,52 @@ func (c *beaconApiValidatorClient) StreamBlocksAltair(ctx context.Context, in *e
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, index primitives.ValidatorIndex, committeeLength uint64) (*ethpb.AggregateSelectionResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitAggregateSelectionProof")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.AggregateSelectionResponse]("SubmitAggregateSelectionProof", func() (*ethpb.AggregateSelectionResponse, error) {
|
||||
return c.submitAggregateSelectionProof(ctx, in, index, committeeLength)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSignedAggregateSelectionProof")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.SignedAggregateSubmitResponse]("SubmitSignedAggregateSelectionProof", func() (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.submitSignedAggregateSelectionProof(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSignedContributionAndProof")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*empty.Empty]("SubmitSignedContributionAndProof", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.submitSignedContributionAndProof(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSyncMessage")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*empty.Empty]("SubmitSyncMessage", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.submitSyncMessage(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitValidatorRegistrations")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*empty.Empty]("SubmitValidatorRegistrations", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.submitValidatorRegistrations(ctx, in.Messages)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.SubscribeCommitteeSubnets")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*empty.Empty]("SubscribeCommitteeSubnets", func() (*empty.Empty, error) {
|
||||
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, duties)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.ValidatorIndex")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[*ethpb.ValidatorIndexResponse]("ValidatorIndex", func() (*ethpb.ValidatorIndexResponse, error) {
|
||||
return c.validatorIndex(ctx, in)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.ValidatorStatus")
|
||||
defer span.End()
|
||||
|
||||
return c.validatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) WaitForActivation(ctx context.Context, in *ethpb.ValidatorActivationRequest) (ethpb.BeaconNodeValidator_WaitForActivationClient, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.WaitForActivation")
|
||||
defer span.End()
|
||||
|
||||
return c.waitForActivation(ctx, in)
|
||||
}
|
||||
|
||||
@@ -278,21 +212,11 @@ func (c *beaconApiValidatorClient) EventStreamIsRunning() bool {
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) AggregatedSelections(ctx context.Context, selections []iface.BeaconCommitteeSelection) ([]iface.BeaconCommitteeSelection, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.AggregatedSelections")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[[]iface.BeaconCommitteeSelection]("AggregatedSelections", func() ([]iface.BeaconCommitteeSelection, error) {
|
||||
return c.aggregatedSelection(ctx, selections)
|
||||
})
|
||||
return c.aggregatedSelection(ctx, selections)
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) AggregatedSyncSelections(ctx context.Context, selections []iface.SyncCommitteeSelection) ([]iface.SyncCommitteeSelection, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-api.AggregatedSyncSelections")
|
||||
defer span.End()
|
||||
|
||||
return wrapInMetrics[[]iface.SyncCommitteeSelection]("AggregatedSyncSelections", func() ([]iface.SyncCommitteeSelection, error) {
|
||||
return c.aggregatedSyncSelections(ctx, selections)
|
||||
})
|
||||
return c.aggregatedSyncSelections(ctx, selections)
|
||||
}
|
||||
|
||||
func wrapInMetrics[Resp any](action string, f func() (Resp, error)) (Resp, error) {
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataValid(t *testing.T) {
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
|
||||
&produceAttestationDataResponseJson,
|
||||
).Return(
|
||||
@@ -66,7 +66,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) {
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
|
||||
&produceAttestationDataResponseJson,
|
||||
).Return(
|
||||
@@ -109,7 +109,7 @@ func TestBeaconApiValidatorClient_DomainDataValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesisProvider := mock.NewMockGenesisProvider(ctrl)
|
||||
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
|
||||
genesisProvider.EXPECT().Genesis(ctx).Return(
|
||||
&structs.Genesis{GenesisValidatorsRoot: genesisValidatorRoot},
|
||||
nil,
|
||||
).Times(2)
|
||||
@@ -139,7 +139,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockValid(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/beacon/blocks",
|
||||
map[string]string{"Eth-Consensus-Version": "phase0"},
|
||||
gomock.Any(),
|
||||
@@ -175,7 +175,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockError(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/beacon/blocks",
|
||||
map[string]string{"Eth-Consensus-Version": "phase0"},
|
||||
gomock.Any(),
|
||||
|
||||
@@ -98,7 +98,7 @@ func TestGetAggregatedSelections(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
"/eth/v1/validator/beacon_committee_selections",
|
||||
nil,
|
||||
bytes.NewBuffer(reqBody),
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestGetDomainData_ValidDomainData(t *testing.T) {
|
||||
|
||||
// Make sure that Genesis() is called exactly once
|
||||
genesisProvider := mock.NewMockGenesisProvider(ctrl)
|
||||
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
|
||||
genesisProvider.EXPECT().Genesis(ctx).Return(
|
||||
&structs.Genesis{GenesisValidatorsRoot: genesisValidatorRoot},
|
||||
nil,
|
||||
).Times(1)
|
||||
@@ -66,7 +66,7 @@ func TestGetDomainData_GenesisError(t *testing.T) {
|
||||
|
||||
// Make sure that Genesis() is called exactly once
|
||||
genesisProvider := mock.NewMockGenesisProvider(ctrl)
|
||||
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(nil, errors.New("foo error")).Times(1)
|
||||
genesisProvider.EXPECT().Genesis(ctx).Return(nil, errors.New("foo error")).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{genesisProvider: genesisProvider}
|
||||
_, err := validatorClient.domainData(ctx, epoch, domainType)
|
||||
@@ -85,7 +85,7 @@ func TestGetDomainData_InvalidGenesisRoot(t *testing.T) {
|
||||
|
||||
// Make sure that Genesis() is called exactly once
|
||||
genesisProvider := mock.NewMockGenesisProvider(ctrl)
|
||||
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
|
||||
genesisProvider.EXPECT().Genesis(ctx).Return(
|
||||
&structs.Genesis{GenesisValidatorsRoot: "foo"},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
@@ -291,11 +291,13 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if testCase.getSyncingOutput != nil {
|
||||
syncingResponseJson := structs.SyncStatusResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
syncingEndpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
@@ -310,7 +312,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
stateForkResponseJson := structs.GetStateForkResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
@@ -325,7 +327,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
blockHeadersResponseJson := structs.GetBlockHeadersResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
@@ -344,7 +346,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
iface.inputUrl,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
@@ -362,7 +364,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
|
||||
|
||||
if testCase.getStateValidatorsInterface != nil {
|
||||
stateValidatorsProvider.EXPECT().StateValidators(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
testCase.getStateValidatorsInterface.input,
|
||||
nil,
|
||||
nil,
|
||||
@@ -725,11 +727,13 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if testCase.getSyncingOutput != nil {
|
||||
syncingResponseJson := structs.SyncStatusResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
syncingEndpoint,
|
||||
&syncingResponseJson,
|
||||
).Return(
|
||||
@@ -744,7 +748,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
stateForkResponseJson := structs.GetStateForkResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
forkEndpoint,
|
||||
&stateForkResponseJson,
|
||||
).Return(
|
||||
@@ -759,7 +763,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
blockHeadersResponseJson := structs.GetBlockHeadersResponse{}
|
||||
|
||||
jsonRestHandler.EXPECT().Get(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
headersEndpoint,
|
||||
&blockHeadersResponseJson,
|
||||
).Return(
|
||||
@@ -774,7 +778,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
|
||||
if testCase.getStateValidatorsInterface != nil {
|
||||
stateValidatorsProvider.EXPECT().StateValidators(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
testCase.getStateValidatorsInterface.input,
|
||||
nil,
|
||||
nil,
|
||||
@@ -792,7 +796,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
ctx,
|
||||
iface.inputUrl,
|
||||
nil,
|
||||
bytes.NewBuffer(marshalledIndexes),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user