Compare commits

...

15 Commits

Author SHA1 Message Date
Taranpreet26311
f265eb7a84 Merge branch 'develop' into Stale_PR_checker 2024-06-25 17:40:29 +04:00
Taranpreet26311
6562036c54 PR to add Stale pull request checker for Prysm 2024-06-25 17:39:46 +04:00
Radosław Kapka
b8aad84285 EIP-7549: attestation pool (#14121)
* implementation

* test fixes

* Electra tests

* remove aggregator tests

* id comments and tests

* make Id equal to [33]byte
2024-06-25 13:18:07 +00:00
Sammy Rosso
5f0d6074d6 Prysmctl ui bug (#14140)
* Fix ui bug

* Better logging
2024-06-25 13:13:42 +00:00
james-prysm
9d6a2f5390 Remove electra duplicate helpers (#14138)
* removing duplicate helper functions to reduce 6110 size

* linting
2024-06-24 21:22:46 +00:00
Preston Van Loon
490ddbf782 Enable golang.org/x/tools/go/analysis/errorsas static analysis check (#14135) 2024-06-24 17:20:45 +00:00
Radosław Kapka
adc875b20d EIP-7549: p2p and sync (#14085)
* EIP-7549: p2p and sync

* small cleanup

* fuzz fix

* deepsource

* review

* fix ineffectual assignment

* fix pubsub

* update ComputeSubnetForAttestation

* review

* review
2024-06-24 13:57:11 +00:00
kasey
8cd249c1c8 update codegen dep and cleanup organization (#14127)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-06-20 23:33:54 +00:00
Preston Van Loon
305d5850e7 ssz: Move stateutil.SliceRoot to ssz package (#14123) 2024-06-20 20:55:15 +00:00
Radosław Kapka
df3a9f218d More tracing in the validator client (#14125)
* More tracing in the validator client

* change context expectation in tests
2024-06-20 16:13:53 +00:00
Preston Van Loon
ae451a3a02 Update github.com/prysmaticlabs/go-bitfield (#14120) 2024-06-18 14:33:06 +00:00
Radosław Kapka
17561a6576 Do not fail production when consensus block value is unavailable (#14111)
* Do not fail production when consensus block value is unavailable

* add log

* use empty string instead of 0

* build fix
2024-06-14 18:30:40 +00:00
james-prysm
b842b7ea01 proposer settings log ux (#14106)
* adding some logs to improve debugging

* fixing log functions

* Update config/proposer/loader/loader.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-06-14 14:16:43 +00:00
sam (jgscripts)
9bbe12e28c Correcting spelling errors (#14107)
* fix small spelling error

* fix small grammar error

* fix small spelling errors

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-06-14 13:41:33 +00:00
Delweng
0674cf64cc chore: make deepsource happy (#14081)
* chore(pruner): return error directly

Signed-off-by: jsvisa <delweng@gmail.com>

* chore(rpc): unused method receiver

Signed-off-by: jsvisa <delweng@gmail.com>

* fix(rpc): use net.JoinHostPort instead of fmt.Sprintf

Signed-off-by: jsvisa <delweng@gmail.com>

* chore(amiddleware):use http.NoBody instead of nil

Signed-off-by: jsvisa <delweng@gmail.com>

* chore(rpc): rm notused params

Signed-off-by: jsvisa <delweng@gmail.com>

* chore(p2p): comment

Signed-off-by: jsvisa <delweng@gmail.com>

* feat(db/prune): reduce complexity

Signed-off-by: jsvisa <delweng@gmail.com>

* chore(db/pruner): name

Signed-off-by: jsvisa <delweng@gmail.com>

* Revert "chore(pruner): return error directly"

This reverts commit d76e745f60.

Signed-off-by: jsvisa <delweng@gmail.com>

* revert back pruner.go

Signed-off-by: jsvisa <delweng@gmail.com>

---------

Signed-off-by: jsvisa <delweng@gmail.com>
2024-06-13 16:12:04 +00:00
147 changed files with 21981 additions and 22505 deletions

27
.github/workflows/stale_pr_checker.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: Find stale PRs
on:
schedule:
- cron: '0 13 * * 1'
jobs:
fetch-PRs:
runs-on: ubuntu-latest
steps:
- name: Fetch pull requests from here
id: local
uses: paritytech/stale-pr-finder@main
with:
GITHUB_TOKEN: ${{ github.token }}
repo: prysm
- name: Post to a Slack channel
id: slack
uses: slackapi/slack-github-action@v1.23.0
with:
channel-id: ${{ secrets.CHANNEL }}
slack-message: |
Stale PRs this week:
${{ steps.local.outputs.message }}
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
LOCAL_PR: ${{ steps.local.outputs.message }}"

View File

@@ -224,6 +224,7 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
# fieldalignment disabled
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",

View File

@@ -31,7 +31,7 @@ func TestNormalizeQueryValuesHandler(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
if err != nil {
t.Fatal(err)
}

View File

@@ -40,7 +40,7 @@ import (
//
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
defer span.End()
if st == nil || st.IsNil() {
@@ -68,7 +68,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
break
}
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
return err
}

View File

@@ -295,14 +295,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
}
for _, index := range preActivationIndices {
if err := helpers.QueueEntireBalanceAndResetValidator(post, index); err != nil {
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
}
}
// Ensure early adopters of compounding credentials go through the activation churn
for _, index := range compoundWithdrawalIndices {
if err := helpers.QueueExcessActiveBalance(post, index); err != nil {
if err := QueueExcessActiveBalance(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue excess active balance")
}
}

View File

@@ -1,7 +1,6 @@
package electra
import (
"context"
"errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
@@ -19,7 +18,7 @@ import (
// if has_eth1_withdrawal_credential(validator):
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
// queue_excess_active_balance(state, index)
func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
v, err := s.ValidatorAtIndex(idx)
if err != nil {
return err
@@ -32,12 +31,12 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return queueExcessActiveBalance(ctx, s, idx)
return QueueExcessActiveBalance(s, idx)
}
return nil
}
// queueExcessActiveBalance
// QueueExcessActiveBalance
//
// Spec definition:
//
@@ -49,7 +48,7 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=excess_balance)
// )
func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
@@ -80,7 +79,7 @@ func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx prim
// )
//
//nolint:dupword
func QueueEntireBalanceAndResetValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err

View File

@@ -2,7 +2,6 @@ package electra_test
import (
"bytes"
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
@@ -11,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestSwitchToCompoundingValidator(t *testing.T) {
@@ -34,10 +34,10 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
})
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
require.NoError(t, err)
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(context.TODO(), s, 0))
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
// Test that a validator with withdrawal credentials can be switched to compounding.
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 1))
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
v, err := s.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
@@ -50,7 +50,7 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 2))
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
b, err = s.BalanceAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
@@ -74,7 +74,7 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
},
})
require.NoError(t, err)
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(context.TODO(), s, 0))
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(s, 0))
b, err := s.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), b, "balance was not changed")
@@ -88,3 +88,57 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
}
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
vals := st.Validators()
vals[0].WithdrawalCredentials = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte}
require.NoError(t, st.SetValidators(vals))
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 1010
require.NoError(t, st.SetBalances(bals))
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
val, err := st.ValidatorAtIndex(0)
require.NoError(t, err)
bytes.HasPrefix(val.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
}
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
require.NoError(t, st.SetBalances(bals))
err := electra.QueueExcessActiveBalance(st, 0)
require.NoError(t, err)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
bals = st.Balances()
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
}
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
// need to manually set this to 0 as after 6110 these balances are now 0 and instead populates pending balance deposits
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance - 1000
require.NoError(t, st.SetBalances(bals))
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
require.NoError(t, err)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd))
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
bal, err := st.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), bal)
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -91,6 +92,14 @@ func IsAggregated(attestation ethpb.Att) bool {
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
if att.Version() >= version.Electra {
committeeIndex := 0
committeeIndices := att.CommitteeBitsVal().BitIndices()
if len(committeeIndices) > 0 {
committeeIndex = committeeIndices[0]
}
return ComputeSubnetFromCommitteeAndSlot(activeValCount, primitives.CommitteeIndex(committeeIndex), att.GetData().Slot)
}
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
}

View File

@@ -73,21 +73,37 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
att := &ethpb.Attestation{
AggregationBits: []byte{'A'},
Data: &ethpb.AttestationData{
Slot: 34,
CommitteeIndex: 4,
BeaconBlockRoot: []byte{'C'},
Source: nil,
Target: nil,
},
Signature: []byte{'B'},
}
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(att.Data.Slot))
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34))
require.NoError(t, err)
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
t.Run("Phase 0", func(t *testing.T) {
att := &ethpb.Attestation{
AggregationBits: []byte{'A'},
Data: &ethpb.AttestationData{
Slot: 34,
CommitteeIndex: 4,
BeaconBlockRoot: []byte{'C'},
},
Signature: []byte{'B'},
}
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
})
t.Run("Electra", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(4, true)
att := &ethpb.AttestationElectra{
AggregationBits: []byte{'A'},
CommitteeBits: cb,
Data: &ethpb.AttestationData{
Slot: 34,
BeaconBlockRoot: []byte{'C'},
},
Signature: []byte{'B'},
}
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
})
}
func Test_ValidateAttestationTime(t *testing.T) {

View File

@@ -674,68 +674,3 @@ func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
}
return params.BeaconConfig().MinActivationBalance
}
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
//
// Spec definition:
//
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// if balance > MIN_ACTIVATION_BALANCE:
// excess_balance = balance - MIN_ACTIVATION_BALANCE
// state.balances[index] = MIN_ACTIVATION_BALANCE
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=excess_balance)
// )
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if bal > params.BeaconConfig().MinActivationBalance {
excessBalance := bal - params.BeaconConfig().MinActivationBalance
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, excessBalance)
}
return nil
}
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
//
// Spec definition:
//
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// validator = state.validators[index]
// state.balances[index] = 0
// validator.effective_balance = 0
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=balance)
// )
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
return err
}
v, err := s.ValidatorAtIndex(idx)
if err != nil {
return err
}
v.EffectiveBalance = 0
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, bal)
}

View File

@@ -18,7 +18,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestIsActiveValidator_OK(t *testing.T) {
@@ -1120,40 +1119,3 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
}
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
require.NoError(t, st.SetBalances(bals))
err := helpers.QueueExcessActiveBalance(st, 0)
require.NoError(t, err)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, uint64(1000), pbd[0].Amount)
bals = st.Balances()
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
}
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
val, err := st.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pbd))
err = helpers.QueueEntireBalanceAndResetValidator(st, 0)
require.NoError(t, err)
pbd, err = st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd))
val, err = st.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), val.EffectiveBalance)
}

View File

@@ -69,7 +69,6 @@ go_library(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -197,7 +196,3 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
return params.SetActive(c)
}
func configureFastSSZHashingAlgorithm() {
fastssz.EnableVectorizedHTR = true
}

View File

@@ -277,8 +277,6 @@ func configureBeacon(cliCtx *cli.Context) error {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}

View File

@@ -21,12 +21,13 @@ go_library(
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -16,9 +16,10 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
"//runtime/version:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
@@ -39,14 +40,15 @@ go_test(
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -9,7 +9,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -32,28 +34,28 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
defer span.End()
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(unaggregatedAtts))
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(unaggregatedAtts))
for _, att := range unaggregatedAtts {
attDataRoot, err := att.GetData().HashTreeRoot()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return err
return errors.Wrap(err, "could not create attestation ID")
}
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
}
// Aggregate unaggregated attestations from the pool and save them in the pool.
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
leftOverUnaggregatedAtt := make(map[attestation.Id]bool)
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
leftOverUnaggregatedAtt = c.aggregateParallel(attsByVerAndDataRoot, leftOverUnaggregatedAtt)
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return err
return errors.Wrap(err, "could not create attestation ID")
}
if leftOverUnaggregatedAtt[h] {
if leftOverUnaggregatedAtt[id] {
continue
}
if err := c.DeleteUnaggregatedAttestation(att); err != nil {
@@ -66,7 +68,7 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
// returns the unaggregated attestations that weren't able to aggregate.
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver map[[32]byte]bool) map[[32]byte]bool {
func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftOver map[attestation.Id]bool) map[attestation.Id]bool {
var leftoverLock sync.Mutex
wg := sync.WaitGroup{}
@@ -92,13 +94,13 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver ma
continue
}
} else {
h, err := hashFn(aggregated)
id, err := attestation.NewId(aggregated, attestation.Full)
if err != nil {
log.WithError(err).Error("could not hash attestation")
log.WithError(err).Error("Could not create attestation ID")
continue
}
leftoverLock.Lock()
leftOver[h] = true
leftOver[id] = true
leftoverLock.Unlock()
}
}
@@ -139,17 +141,18 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
return nil
}
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
copiedAtt := att.Copy()
c.aggregatedAttLock.Lock()
defer c.aggregatedAttLock.Unlock()
atts, ok := c.aggregatedAtt[r]
atts, ok := c.aggregatedAtt[id]
if !ok {
atts := []ethpb.Att{copiedAtt}
c.aggregatedAtt[r] = atts
c.aggregatedAtt[id] = atts
return nil
}
@@ -157,7 +160,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
if err != nil {
return err
}
c.aggregatedAtt[r] = atts
c.aggregatedAtt[id] = atts
return nil
}
@@ -191,17 +194,56 @@ func (c *AttCaches) AggregatedAttestations() []ethpb.Att {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
func (c *AttCaches) AggregatedAttestationsBySlotIndex(
ctx context.Context,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
) []*ethpb.Attestation {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]ethpb.Att, 0)
atts := make([]*ethpb.Attestation, 0)
c.aggregatedAttLock.RLock()
defer c.aggregatedAttLock.RUnlock()
for _, a := range c.aggregatedAtt {
if slot == a[0].GetData().Slot && committeeIndex == a[0].GetData().CommitteeIndex {
atts = append(atts, a...)
for _, as := range c.aggregatedAtt {
if as[0].Version() == version.Phase0 && slot == as[0].GetData().Slot && committeeIndex == as[0].GetData().CommitteeIndex {
for _, a := range as {
att, ok := a.(*ethpb.Attestation)
// This will never fail in practice because we asserted the version
if ok {
atts = append(atts, att)
}
}
}
}
return atts
}
// AggregatedAttestationsBySlotIndexElectra returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) AggregatedAttestationsBySlotIndexElectra(
ctx context.Context,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
) []*ethpb.AttestationElectra {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndexElectra")
defer span.End()
atts := make([]*ethpb.AttestationElectra, 0)
c.aggregatedAttLock.RLock()
defer c.aggregatedAttLock.RUnlock()
for _, as := range c.aggregatedAtt {
if as[0].Version() == version.Electra && slot == as[0].GetData().Slot && as[0].CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
for _, a := range as {
att, ok := a.(*ethpb.AttestationElectra)
// This will never fail in practice because we asserted the version
if ok {
atts = append(atts, att)
}
}
}
}
@@ -216,18 +258,19 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
if !helpers.IsAggregated(att) {
return errors.New("attestation is not aggregated")
}
r, err := hashFn(att.GetData())
if err != nil {
return errors.Wrap(err, "could not tree hash attestation data")
}
if err := c.insertSeenBit(att); err != nil {
return err
}
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
c.aggregatedAttLock.Lock()
defer c.aggregatedAttLock.Unlock()
attList, ok := c.aggregatedAtt[r]
attList, ok := c.aggregatedAtt[id]
if !ok {
return nil
}
@@ -241,9 +284,9 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
}
}
if len(filtered) == 0 {
delete(c.aggregatedAtt, r)
delete(c.aggregatedAtt, id)
} else {
c.aggregatedAtt[r] = filtered
c.aggregatedAtt[id] = filtered
}
return nil
@@ -254,14 +297,15 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
if err := helpers.ValidateNilAttestation(att); err != nil {
return false, err
}
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, errors.Wrap(err, "could not tree hash attestation")
return false, errors.Wrap(err, "could not create attestation ID")
}
c.aggregatedAttLock.RLock()
defer c.aggregatedAttLock.RUnlock()
if atts, ok := c.aggregatedAtt[r]; ok {
if atts, ok := c.aggregatedAtt[id]; ok {
for _, a := range atts {
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return false, err
@@ -273,7 +317,7 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
c.blockAttLock.RLock()
defer c.blockAttLock.RUnlock()
if atts, ok := c.blockAtt[r]; ok {
if atts, ok := c.blockAtt[id]; ok {
for _, a := range atts {
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return false, err

View File

@@ -7,10 +7,11 @@ import (
c "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -69,7 +70,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
}),
AggregationBits: bitfield.Bitlist{0b10111},
},
wantErrString: "could not tree hash attestation: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")",
wantErrString: "could not create attestation ID",
},
{
name: "already seen",
@@ -92,15 +93,13 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
count: 1,
},
}
r, err := hashFn(util.HydrateAttestationData(&ethpb.AttestationData{
Slot: 100,
}))
id, err := attestation.NewId(util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100}}), attestation.Data)
require.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := NewAttCaches()
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
err := cache.SaveAggregatedAttestation(tt.att)
@@ -230,7 +229,7 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
},
}
err := cache.DeleteAggregatedAttestation(att)
wantErr := "could not tree hash attestation data: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")"
wantErr := "could not create attestation ID"
assert.ErrorContains(t, wantErr, err)
})
@@ -500,3 +499,49 @@ func TestKV_Aggregated_DuplicateAggregatedAttestations(t *testing.T) {
assert.DeepSSZEqual(t, att2, returned[0], "Did not receive correct aggregated atts")
assert.Equal(t, 1, len(returned), "Did not receive correct aggregated atts")
}
func TestKV_Aggregated_AggregatedAttestationsBySlotIndex(t *testing.T) {
cache := NewAttCaches()
att1 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1011}})
att2 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
att3 := util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
atts := []*ethpb.Attestation{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveAggregatedAttestation(att))
}
ctx := context.Background()
returned := cache.AggregatedAttestationsBySlotIndex(ctx, 1, 1)
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 2, 1)
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
}
func TestKV_Aggregated_AggregatedAttestationsBySlotIndexElectra(t *testing.T) {
cache := NewAttCaches()
committeeBits := primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(1, true)
att1 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1011}, CommitteeBits: committeeBits})
committeeBits = primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(2, true)
att2 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
committeeBits = primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(1, true)
att3 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
atts := []*ethpb.AttestationElectra{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveAggregatedAttestation(att))
}
ctx := context.Background()
returned := cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
}

View File

@@ -3,6 +3,7 @@ package kv
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// SaveBlockAttestation saves an block attestation in cache.
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
if att == nil {
return nil
}
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.blockAttLock.Lock()
defer c.blockAttLock.Unlock()
atts, ok := c.blockAtt[r]
atts, ok := c.blockAtt[id]
if !ok {
atts = make([]ethpb.Att, 0, 1)
}
@@ -31,7 +33,7 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
}
}
c.blockAtt[r] = append(atts, att.Copy())
c.blockAtt[id] = append(atts, att.Copy())
return nil
}
@@ -54,14 +56,15 @@ func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
if att == nil {
return nil
}
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.blockAttLock.Lock()
defer c.blockAttLock.Unlock()
delete(c.blockAtt, r)
delete(c.blockAtt, id)
return nil
}

View File

@@ -3,6 +3,7 @@ package kv
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
if att == nil {
return nil
}
r, err := hashFn(att)
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
c.forkchoiceAtt[r] = att.Copy()
c.forkchoiceAtt[id] = att
return nil
}
@@ -51,14 +53,15 @@ func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
if att == nil {
return nil
}
r, err := hashFn(att)
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
delete(c.forkchoiceAtt, r)
delete(c.forkchoiceAtt, id)
return nil
}

View File

@@ -9,24 +9,22 @@ import (
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
var hashFn = hash.Proto
// AttCaches defines the caches used to satisfy attestation pool interface.
// These caches are KV store for various attestations
// such are unaggregated, aggregated or attestations within a block.
type AttCaches struct {
aggregatedAttLock sync.RWMutex
aggregatedAtt map[[32]byte][]ethpb.Att
aggregatedAtt map[attestation.Id][]ethpb.Att
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[[32]byte]ethpb.Att
unAggregatedAtt map[attestation.Id]ethpb.Att
forkchoiceAttLock sync.RWMutex
forkchoiceAtt map[[32]byte]ethpb.Att
forkchoiceAtt map[attestation.Id]ethpb.Att
blockAttLock sync.RWMutex
blockAtt map[[32]byte][]ethpb.Att
blockAtt map[attestation.Id][]ethpb.Att
seenAtt *cache.Cache
}
@@ -36,10 +34,10 @@ func NewAttCaches() *AttCaches {
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
c := cache.New(secsInEpoch*time.Second, 2*secsInEpoch*time.Second)
pool := &AttCaches{
unAggregatedAtt: make(map[[32]byte]ethpb.Att),
aggregatedAtt: make(map[[32]byte][]ethpb.Att),
forkchoiceAtt: make(map[[32]byte]ethpb.Att),
blockAtt: make(map[[32]byte][]ethpb.Att),
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
forkchoiceAtt: make(map[attestation.Id]ethpb.Att),
blockAtt: make(map[attestation.Id][]ethpb.Att),
seenAtt: c,
}

View File

@@ -1,21 +1,20 @@
package kv
import (
"fmt"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return err
return errors.Wrap(err, "could not create attestation ID")
}
v, ok := c.seenAtt.Get(string(r[:]))
v, ok := c.seenAtt.Get(id.String())
if ok {
seenBits, ok := v.([]bitfield.Bitlist)
if !ok {
@@ -24,7 +23,7 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
alreadyExists := false
for _, bit := range seenBits {
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
return fmt.Errorf("failed to check seen bits on attestation when inserting bit: %w", err)
return err
} else if c {
alreadyExists = true
break
@@ -33,21 +32,21 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
if !alreadyExists {
seenBits = append(seenBits, att.GetAggregationBits())
}
c.seenAtt.Set(string(r[:]), seenBits, cache.DefaultExpiration /* one epoch */)
c.seenAtt.Set(id.String(), seenBits, cache.DefaultExpiration /* one epoch */)
return nil
}
c.seenAtt.Set(string(r[:]), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
c.seenAtt.Set(id.String(), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
return nil
}
func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
r, err := hashFn(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, err
return false, errors.Wrap(err, "could not create attestation ID")
}
v, ok := c.seenAtt.Get(string(r[:]))
v, ok := c.seenAtt.Get(id.String())
if ok {
seenBits, ok := v.([]bitfield.Bitlist)
if !ok {
@@ -55,7 +54,7 @@ func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
}
for _, bit := range seenBits {
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
return false, fmt.Errorf("failed to check seen bits on attestation when reading bit: %w", err)
return false, err
} else if c {
return true, nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
@@ -39,18 +40,18 @@ func TestAttCaches_hasSeenBit(t *testing.T) {
func TestAttCaches_insertSeenBitDuplicates(t *testing.T) {
c := NewAttCaches()
att1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
r, err := hashFn(att1.Data)
id, err := attestation.NewId(att1, attestation.Data)
require.NoError(t, err)
require.NoError(t, c.insertSeenBit(att1))
require.Equal(t, 1, c.seenAtt.ItemCount())
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(string(r[:]))
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(id.String())
require.Equal(t, true, ok)
// Make sure that duplicates are not inserted, but expiration time gets updated.
require.NoError(t, c.insertSeenBit(att1))
require.Equal(t, 1, c.seenAtt.ItemCount())
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(string(r[:]))
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(id.String())
require.Equal(t, true, ok)
require.Equal(t, true, expirationprysmTime.After(expirationTime1), "Expiration time is not updated")
}

View File

@@ -7,6 +7,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"go.opencensus.io/trace"
)
@@ -27,13 +29,14 @@ func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
return nil
}
r, err := hashFn(att)
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.unAggregateAttLock.Lock()
defer c.unAggregateAttLock.Unlock()
c.unAggregatedAtt[r] = att.Copy()
c.unAggregatedAtt[id] = att
return nil
}
@@ -69,19 +72,56 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(
ctx context.Context,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
) []*ethpb.Attestation {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]ethpb.Att, 0)
atts := make([]*ethpb.Attestation, 0)
c.unAggregateAttLock.RLock()
defer c.unAggregateAttLock.RUnlock()
unAggregatedAtts := c.unAggregatedAtt
for _, a := range unAggregatedAtts {
if slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
atts = append(atts, a)
if a.Version() == version.Phase0 && slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
att, ok := a.(*ethpb.Attestation)
// This will never fail in practice because we asserted the version
if ok {
atts = append(atts, att)
}
}
}
return atts
}
// UnaggregatedAttestationsBySlotIndexElectra returns the unaggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
ctx context.Context,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
) []*ethpb.AttestationElectra {
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndexElectra")
defer span.End()
atts := make([]*ethpb.AttestationElectra, 0)
c.unAggregateAttLock.RLock()
defer c.unAggregateAttLock.RUnlock()
unAggregatedAtts := c.unAggregatedAtt
for _, a := range unAggregatedAtts {
if a.Version() == version.Electra && slot == a.GetData().Slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
att, ok := a.(*ethpb.AttestationElectra)
// This will never fail in practice because we asserted the version
if ok {
atts = append(atts, att)
}
}
}
@@ -101,14 +141,14 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
return err
}
r, err := hashFn(att)
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not tree hash attestation")
return errors.Wrap(err, "could not create attestation ID")
}
c.unAggregateAttLock.Lock()
defer c.unAggregateAttLock.Unlock()
delete(c.unAggregatedAtt, r)
delete(c.unAggregatedAtt, id)
return nil
}

View File

@@ -7,10 +7,11 @@ import (
"testing"
c "github.com/patrickmn/go-cache"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -39,7 +40,7 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
BeaconBlockRoot: []byte{0b0},
},
},
wantErrString: fssz.ErrBytesLength.Error(),
wantErrString: "could not create attestation ID",
},
{
name: "normal save",
@@ -57,13 +58,13 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
count: 0,
},
}
r, err := hashFn(util.HydrateAttestationData(&ethpb.AttestationData{Slot: 100}))
id, err := attestation.NewId(util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100}}), attestation.Data)
require.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := NewAttCaches()
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
if tt.att != nil && tt.att.GetSignature() == nil {
@@ -246,9 +247,35 @@ func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
}
ctx := context.Background()
returned := cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 1)
assert.DeepEqual(t, []ethpb.Att{att1}, returned)
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)
assert.DeepEqual(t, []ethpb.Att{att2}, returned)
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 1)
assert.DeepEqual(t, []ethpb.Att{att3}, returned)
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
}
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndexElectra(t *testing.T) {
cache := NewAttCaches()
committeeBits := primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(1, true)
att1 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}, CommitteeBits: committeeBits})
committeeBits = primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(2, true)
att2 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
committeeBits = primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(1, true)
att3 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
atts := []*ethpb.AttestationElectra{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
}
ctx := context.Background()
returned := cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
}

View File

@@ -18,7 +18,8 @@ type Pool interface {
SaveAggregatedAttestation(att ethpb.Att) error
SaveAggregatedAttestations(atts []ethpb.Att) error
AggregatedAttestations() []ethpb.Att
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
AggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
DeleteAggregatedAttestation(att ethpb.Att) error
HasAggregatedAttestation(att ethpb.Att) (bool, error)
AggregatedAttestationCount() int
@@ -26,7 +27,8 @@ type Pool interface {
SaveUnaggregatedAttestation(att ethpb.Att) error
SaveUnaggregatedAttestations(atts []ethpb.Att) error
UnaggregatedAttestations() ([]ethpb.Att, error)
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
UnaggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
DeleteUnaggregatedAttestation(att ethpb.Att) error
DeleteSeenUnaggregatedAttestations() (int, error)
UnaggregatedAttestationCount() int

View File

@@ -3,14 +3,14 @@ package attestations
import (
"bytes"
"context"
"errors"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
@@ -67,7 +67,7 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
atts := append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(atts))
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(atts))
// Consolidate attestations by aggregating them by similar data root.
for _, att := range atts {
@@ -79,14 +79,14 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
continue
}
attDataRoot, err := att.GetData().HashTreeRoot()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return err
return errors.Wrap(err, "could not create attestation ID")
}
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
}
for _, atts := range attsByDataRoot {
for _, atts := range attsByVerAndDataRoot {
if err := s.aggregateAndSaveForkChoiceAtts(atts); err != nil {
return err
}
@@ -119,12 +119,12 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []ethpb.Att) error {
// This checks if the attestation has previously been aggregated for fork choice
// return true if yes, false if no.
func (s *Service) seen(att ethpb.Att) (bool, error) {
attRoot, err := hash.Proto(att.GetData())
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return false, err
return false, errors.Wrap(err, "could not create attestation ID")
}
incomingBits := att.GetAggregationBits()
savedBits, ok := s.forkChoiceProcessedRoots.Get(attRoot)
savedBits, ok := s.forkChoiceProcessedAtts.Get(id)
if ok {
savedBitlist, ok := savedBits.(bitfield.Bitlist)
if !ok {
@@ -149,6 +149,6 @@ func (s *Service) seen(att ethpb.Att) (bool, error) {
}
}
s.forkChoiceProcessedRoots.Add(attRoot, incomingBits)
s.forkChoiceProcessedAtts.Add(id, incomingBits)
return false, nil
}

View File

@@ -13,16 +13,16 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
)
var forkChoiceProcessedRootsSize = 1 << 16
var forkChoiceProcessedAttsSize = 1 << 16
// Service of attestation pool operations.
type Service struct {
cfg *Config
ctx context.Context
cancel context.CancelFunc
err error
forkChoiceProcessedRoots *lru.Cache
genesisTime uint64
cfg *Config
ctx context.Context
cancel context.CancelFunc
err error
forkChoiceProcessedAtts *lru.Cache
genesisTime uint64
}
// Config options for the service.
@@ -35,7 +35,7 @@ type Config struct {
// NewService instantiates a new attestation pool service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
cache := lruwrpr.New(forkChoiceProcessedRootsSize)
cache := lruwrpr.New(forkChoiceProcessedAttsSize)
if cfg.pruneInterval == 0 {
// Prune expired attestations from the pool every slot interval.
@@ -44,10 +44,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
return &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
forkChoiceProcessedRoots: cache,
cfg: cfg,
ctx: ctx,
cancel: cancel,
forkChoiceProcessedAtts: cache,
}, nil
}

View File

@@ -336,7 +336,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
}),
}
var err error
//test with public filter
// test with public filter
cidr := "public"
ip := "212.67.10.122"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
@@ -348,7 +348,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
t.Errorf("Expected multiaddress with ip %s to not be rejected since we allow public addresses", ip)
}
ip = "192.168.1.0" //this is private and should fail
ip = "192.168.1.0" // this is private and should fail
multiAddress, err = ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
require.NoError(t, err)
valid = s.InterceptAddrDial("", multiAddress)
@@ -356,7 +356,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
t.Errorf("Expected multiaddress with ip %s to be rejected since we are only allowing public addresses", ip)
}
//test with public allow filter, with a public address added to the deny list
// test with public allow filter, with a public address added to the deny list
invalidPublicIp := "212.67.10.122"
validPublicIp := "91.65.69.69"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: "public", DenyListCIDR: []string{"212.67.89.112/16"}})
@@ -384,7 +384,7 @@ func TestService_InterceptAddrDial_Private(t *testing.T) {
}),
}
var err error
//test with private filter
// test with private filter
cidr := "private"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
require.NoError(t, err)
@@ -413,7 +413,7 @@ func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) {
}),
}
var err error
//test with private filter
// test with private filter
cidr := "private"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
require.NoError(t, err)
@@ -442,7 +442,7 @@ func TestService_InterceptAddrDial_DenyPublic(t *testing.T) {
}),
}
var err error
//test with private filter
// test with private filter
cidr := "public"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
require.NoError(t, err)
@@ -471,7 +471,7 @@ func TestService_InterceptAddrDial_AllowConflict(t *testing.T) {
}),
}
var err error
//test with private filter
// test with private filter
cidr := "public"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr, "192.168.0.0/16"}})
require.NoError(t, err)

View File

@@ -51,7 +51,7 @@ func (quicProtocol) ENRKey() string { return "quic" }
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() {
// return early if discv5 isnt running
// return early if discv5 isn't running
if s.dv5Listener == nil || !s.isInitialized() {
return
}

View File

@@ -27,7 +27,8 @@ var gossipTopicMappings = map[string]proto.Message{
// GossipTopicMappings is a function to return the assigned data type
// versioned by epoch.
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
if topic == BlockSubnetTopicFormat {
switch topic {
case BlockSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.SignedBeaconBlockElectra{}
}
@@ -43,8 +44,25 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
if epoch >= params.BeaconConfig().AltairForkEpoch {
return &ethpb.SignedBeaconBlockAltair{}
}
return gossipTopicMappings[topic]
case AttestationSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.AttestationElectra{}
}
return gossipTopicMappings[topic]
case AttesterSlashingSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.AttesterSlashingElectra{}
}
return gossipTopicMappings[topic]
case AggregateAndProofSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.SignedAggregateAttestationAndProofElectra{}
}
return gossipTopicMappings[topic]
default:
return gossipTopicMappings[topic]
}
return gossipTopicMappings[topic]
}
// AllTopics returns all topics stored in our
@@ -75,4 +93,7 @@ func init() {
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
// Specially handle Electra objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttestationElectra{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
}

View File

@@ -22,20 +22,20 @@ func TestMappingHasNoDuplicates(t *testing.T) {
}
}
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
func TestGossipTopicMappings_CorrectType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig().Copy()
altairForkEpoch := primitives.Epoch(100)
BellatrixForkEpoch := primitives.Epoch(200)
CapellaForkEpoch := primitives.Epoch(300)
DenebForkEpoch := primitives.Epoch(400)
ElectraForkEpoch := primitives.Epoch(500)
bellatrixForkEpoch := primitives.Epoch(200)
capellaForkEpoch := primitives.Epoch(300)
denebForkEpoch := primitives.Epoch(400)
electraForkEpoch := primitives.Epoch(500)
bCfg.AltairForkEpoch = altairForkEpoch
bCfg.BellatrixForkEpoch = BellatrixForkEpoch
bCfg.CapellaForkEpoch = CapellaForkEpoch
bCfg.DenebForkEpoch = DenebForkEpoch
bCfg.ElectraForkEpoch = ElectraForkEpoch
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
bCfg.CapellaForkEpoch = capellaForkEpoch
bCfg.DenebForkEpoch = denebForkEpoch
bCfg.ElectraForkEpoch = electraForkEpoch
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
@@ -47,29 +47,83 @@ func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Altair Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Bellatrix Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, BellatrixForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Capella Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, CapellaForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Deneb Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, DenebForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Electra Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, ElectraForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttestationElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
assert.Equal(t, true, ok)
}

View File

@@ -81,7 +81,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
bootNodeENR := bootListener.Self().String()
// Create 3 nodes, each subscribed to a different subnet.
// Each node is connected to the boostrap node.
// Each node is connected to the bootstrap node.
services := make([]*Service, 0, 3)
for i := 1; i <= 3; i++ {

View File

@@ -43,6 +43,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -26,7 +26,13 @@ var (
BlockMap map[[4]byte]func() (interfaces.ReadOnlySignedBeaconBlock, error)
// MetaDataMap maps the fork-version to the underlying data type for that
// particular fork period.
MetaDataMap map[[4]byte]func() metadata.Metadata
MetaDataMap map[[4]byte]func() (metadata.Metadata, error)
// AttestationMap maps the fork-version to the underlying data type for that
// particular fork period.
AttestationMap map[[4]byte]func() (ethpb.Att, error)
// AggregateAttestationMap maps the fork-version to the underlying data type for that
// particular fork period.
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
)
// InitializeDataMaps initializes all the relevant object maps. This function is called to
@@ -67,24 +73,68 @@ func InitializeDataMaps() {
}
// Reset our metadata map.
MetaDataMap = map[[4]byte]func() metadata.Metadata{
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{})
MetaDataMap = map[[4]byte]func() (metadata.Metadata, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}), nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
}
// Reset our attestation map.
AttestationMap = map[[4]byte]func() (ethpb.Att, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
return &ethpb.AttestationElectra{}, nil
},
}
// Reset our aggregate attestation map.
AggregateAttestationMap = map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProofElectra{}, nil
},
}
}

View File

@@ -5,7 +5,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestInitializeDataMaps(t *testing.T) {
@@ -44,8 +46,36 @@ func TestInitializeDataMaps(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.action()
_, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
bFunc, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
assert.Equal(t, tt.exists, ok)
if tt.exists {
b, err := bFunc()
require.NoError(t, err)
generic, err := b.PbGenericBlock()
require.NoError(t, err)
assert.NotNil(t, generic.GetPhase0())
}
mdFunc, ok := MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if tt.exists {
md, err := mdFunc()
require.NoError(t, err)
assert.NotNil(t, md.MetadataObjV0())
}
assert.Equal(t, tt.exists, ok)
attFunc, ok := AttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if tt.exists {
att, err := attFunc()
require.NoError(t, err)
assert.Equal(t, version.Phase0, att.Version())
}
assert.Equal(t, tt.exists, ok)
aggFunc, ok := AggregateAttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
assert.Equal(t, tt.exists, ok)
if tt.exists {
agg, err := aggFunc()
require.NoError(t, err)
assert.Equal(t, version.Phase0, agg.Version())
}
})
}
}

View File

@@ -70,7 +70,7 @@ func (s *Service) endpoints(
endpoints = append(endpoints, s.eventsEndpoints()...)
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService)...)
if enableDebug {
endpoints = append(endpoints, s.debugEndpoints(stater)...)
}
@@ -143,7 +143,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
}
}
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
func (*Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
server := &blob.Server{
Blocker: blocker,
}
@@ -777,7 +777,7 @@ func (s *Service) beaconEndpoints(
}
}
func (s *Service) configEndpoints() []endpoint {
func (*Service) configEndpoints() []endpoint {
const namespace = "config"
return []endpoint{
{
@@ -1045,7 +1045,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
}
}
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
func (*Service) prysmValidatorEndpoints(coreService *core.Service) []endpoint {
server := &validatorprysm.Server{
CoreService: coreService,
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"handlers.go",
"handlers_block.go",
"log.go",
"server.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator",

View File

@@ -31,7 +31,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/network/httputil"
ethpbalpha "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -592,7 +592,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
if len(validatorIndices) == 0 {
return
}
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"validatorIndices": validatorIndices,
}).Info("Updated fee recipient addresses")
}

View File

@@ -220,8 +220,9 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
if httpError != nil {
httputil.WriteError(w, httpError)
return
log.WithError(httpError).Debug("Failed to get consensus block value")
// Having the consensus block value is not critical to block production
consensusBlockValue = ""
}
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
@@ -297,7 +298,7 @@ func getConsensusBlockValue(ctx context.Context, blockRewardsFetcher rewards.Blo
}
}
if bb.Version() == version.Phase0 {
// ignore for phase 0
// Getting the block value for Phase 0 is very hard, so we ignore it
return "", nil
}
// Get consensus payload value which is the same as the total from the block rewards api.

View File

@@ -0,0 +1,5 @@
package validator
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "beacon-api")

View File

@@ -2,7 +2,6 @@ package validator
import (
"context"
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
@@ -100,12 +99,8 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
best = aggregatedAtt
}
}
att, ok := best.(*ethpb.Attestation)
if !ok {
return nil, fmt.Errorf("best attestation has wrong type (expected %T, got %T)", &ethpb.Attestation{}, best)
}
a := &ethpb.AggregateAttestationAndProof{
Aggregate: att,
Aggregate: best,
SelectionProof: req.SlotSignature,
AggregatorIndex: validatorIndex,
}

View File

@@ -4,7 +4,6 @@ package rpc
import (
"context"
"fmt"
"net"
"sync"
@@ -154,7 +153,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
connectedRPCClients: make(map[net.Addr]bool),
}
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
address := net.JoinHostPort(s.cfg.Host, s.cfg.Port)
lis, err := net.Listen("tcp", address)
if err != nil {
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)

View File

@@ -141,7 +141,7 @@ func (s *Service) processAttestations(
start := time.Now()
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
// Check for attestations slashings (double, surrounding, surrounded votes).
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
if err != nil {
log.WithError(err).Error(couldNotCheckSlashableAtt)

View File

@@ -16,7 +16,6 @@ go_library(
"pending_consolidations_root.go",
"pending_partial_withdrawals_root.go",
"reference.go",
"slice_root.go",
"sync_committee.root.go",
"trie_helpers.go",
"unrealized_justification.go",

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
}

View File

@@ -44,6 +44,7 @@ go_library(
"validate_aggregate_proof.go",
"validate_attester_slashing.go",
"validate_beacon_attestation.go",
"validate_beacon_attestation_electra.go",
"validate_beacon_blocks.go",
"validate_blob.go",
"validate_bls_to_execution_change.go",
@@ -160,7 +161,6 @@ go_test(
"rpc_beacon_blocks_by_root_test.go",
"rpc_blob_sidecars_by_range_test.go",
"rpc_blob_sidecars_by_root_test.go",
"rpc_chunked_response_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_metadata_test.go",
@@ -177,6 +177,7 @@ go_test(
"sync_test.go",
"validate_aggregate_proof_test.go",
"validate_attester_slashing_test.go",
"validate_beacon_attestation_electra_test.go",
"validate_beacon_attestation_test.go",
"validate_beacon_blocks_test.go",
"validate_blob_test.go",

View File

@@ -1,13 +1,20 @@
package sync
import (
"fmt"
"reflect"
"strings"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
@@ -50,11 +57,12 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
}
// Handle different message types across forks.
if topic == p2p.BlockSubnetTopicFormat {
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
if err != nil {
return nil, err
}
dt, err := extractValidDataTypeFromTopic(topic, fDigest[:], s.cfg.clock)
if err != nil {
return nil, err
}
if dt != nil {
m = dt
}
if err := s.cfg.p2p.Encoding().DecodeGossip(msg.Data, m); err != nil {
return nil, err
@@ -63,7 +71,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
}
// Replaces our fork digest with the formatter.
func (_ *Service) replaceForkDigest(topic string) (string, error) {
func (*Service) replaceForkDigest(topic string) (string, error) {
subStrings := strings.Split(topic, "/")
if len(subStrings) != 4 {
return "", errInvalidTopic
@@ -71,3 +79,48 @@ func (_ *Service) replaceForkDigest(topic string) (string, error) {
subStrings[2] = "%x"
return strings.Join(subStrings, "/"), nil
}
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
switch topic {
case p2p.BlockSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
case p2p.AttestationSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
case p2p.AggregateAndProofSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
}
return nil, nil
}
func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), digest []byte, tor blockchain.TemporalOracle) (T, error) {
var zero T
if len(digest) == 0 {
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
}
return f()
}
if len(digest) != forkDigestLength {
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, f := range typeMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return zero, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return f()
}
}
return zero, errors.Wrapf(
ErrNoValidDigest,
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
zero,
digest,
tor.GenesisTime(),
tor.GenesisValidatorsRoot(),
)
}

View File

@@ -11,15 +11,20 @@ import (
"github.com/d4l3k/messagediff"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
@@ -109,3 +114,197 @@ func TestService_decodePubsubMessage(t *testing.T) {
})
}
}
func TestExtractDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
chain blockchain.ChainInfoFetcher
}
tests := []struct {
name string
args args
wantBlock interfaces.ReadOnlySignedBeaconBlock
wantMd metadata.Metadata
wantAtt ethpb.Att
wantAggregate ethpb.SignedAggregateAttAndProof
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "bellatrix fork version",
args: args{
digest: bellatrixDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "capella fork version",
args: args{
digest: capellaDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "deneb fork version",
args: args{
digest: denebDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "electra fork version",
args: args{
digest: electraDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{Block: &ethpb.BeaconBlockElectra{Body: &ethpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadElectra{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.AttestationElectra{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
}
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
}
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
}
})
}
}

View File

@@ -22,7 +22,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
cancel: cancel,
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
r.rateLimiter = newRateLimiter(r.cfg.p2p)

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -87,12 +88,13 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
}
func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb.SignedAggregateAttestationAndProof) {
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
for _, signedAtt := range attestations {
att := signedAtt.Message
aggregate := signedAtt.AggregateAttestationAndProof().AggregateVal()
data := aggregate.GetData()
// The pending attestations can arrive in both aggregated and unaggregated forms,
// each from has distinct validation steps.
if helpers.IsAggregated(att.Aggregate) {
if helpers.IsAggregated(aggregate) {
// Save the pending aggregated attestation to the pool if it passes the aggregated
// validation steps.
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
@@ -101,11 +103,11 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
}
aggValid := pubsub.ValidationAccept == valRes
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
if err := s.cfg.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save aggregate attestation")
continue
}
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
// Broadcasting the signed attestation again once a node is able to process it.
if err := s.cfg.p2p.Broadcast(ctx, signedAtt); err != nil {
@@ -116,39 +118,39 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
// This is an important validation before retrieving attestation pre state to defend against
// attestation's target intentionally reference checkpoint that's long ago.
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
continue
}
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
log.WithError(err).Debug("Could not verify FFG consistency")
continue
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Aggregate.Data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
log.WithError(err).Debug("Could not retrieve attestation prestate")
continue
}
valid, err := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
if err != nil {
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
continue
}
if valid == pubsub.ValidationAccept {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
continue
}
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(att.Aggregate.Data.Slot))
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
if err != nil {
log.WithError(err).Debug("Could not retrieve active validator count")
continue
}
// Broadcasting the signed attestation again once a node is able to process it.
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, aggregate), aggregate); err != nil {
log.WithError(err).Debug("Could not broadcast")
}
}
@@ -160,8 +162,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
// root of the missing block. The value is the list of pending attestations
// that voted for that block root. The caller of this function is responsible
// for not sending repeated attestations to the pending queue.
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
s.pendingAttsLock.Lock()
defer s.pendingAttsLock.Unlock()
@@ -178,7 +180,7 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
_, ok := s.blkRootToPendingAtts[root]
if !ok {
pendingAttCount.Inc()
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
return
}
// Skip if the attestation from the same aggregator already exists in
@@ -192,20 +194,32 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
}
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
if a.Signature != nil {
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
if a.GetSignature() != nil {
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
}
if b.Signature != nil {
if b.GetSignature() != nil {
return false
}
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
aData := aAggregate.GetData()
bData := bAggregate.GetData()
if aData.Slot != bData.Slot {
return false
}
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
if a.Version() >= version.Electra {
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
return false
}
} else if aData.CommitteeIndex != bData.CommitteeIndex {
return false
}
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
}
// This validates the pending attestations in the queue are still valid.
@@ -221,7 +235,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
for bRoot, atts := range s.blkRootToPendingAtts {
for i := len(atts) - 1; i >= 0; i-- {
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
// Remove the pending attestation from the list in place.
atts = append(atts[:i], atts[i+1:]...)
}

View File

@@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: &ethpb.Checkpoint{}}
r := &Service{
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
chainStarted: abool.New(),
}
a := &ethpb.AggregateAttestationAndProof{Aggregate: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: a}}
require.NoError(t, r.processPendingAtts(context.Background()))
require.LogsContain(t, hook, "Requesting block by root")
}
@@ -124,7 +124,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -134,7 +134,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
atts, err := r.cfg.attPool.UnaggregatedAttestations()
@@ -162,7 +162,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
priv, err := bls.RandKey()
@@ -182,7 +182,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32))
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
@@ -245,13 +245,13 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
go r.verifierRoutine()
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
@@ -330,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
seenAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -339,7 +339,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
@@ -353,7 +353,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
// 100 Attestations per block root.
@@ -401,7 +401,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
r1 := [32]byte{'A'}
@@ -428,7 +428,7 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
for i := 0; i < pendingAttsLimit; i++ {
@@ -457,5 +457,71 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
}
func Test_attsAreEqual_Committee(t *testing.T) {
t.Run("Phase 0 equal", func(t *testing.T) {
att1 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
att2 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
assert.Equal(t, true, attsAreEqual(att1, att2))
})
t.Run("Phase 0 not equal", func(t *testing.T) {
att1 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
att2 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 456}}}}
assert.Equal(t, false, attsAreEqual(att1, att2))
})
t.Run("Electra equal", func(t *testing.T) {
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(0, true)
att1 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb1,
}}}
cb2 := primitives.NewAttestationCommitteeBits()
cb2.SetBitAt(0, true)
att2 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb2,
}}}
assert.Equal(t, true, attsAreEqual(att1, att2))
})
t.Run("Electra not equal", func(t *testing.T) {
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(0, true)
att1 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb1,
}}}
cb2 := primitives.NewAttestationCommitteeBits()
cb2.SetBitAt(1, true)
att2 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb2,
}}}
assert.Equal(t, false, attsAreEqual(att1, att2))
})
}

View File

@@ -4,14 +4,12 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -107,7 +105,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOrac
if err != nil {
return nil, err
}
blk, err := extractBlockDataType(rpcCtx, tor)
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
if err != nil {
return nil, err
}
@@ -131,7 +129,7 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
if err != nil {
return nil, err
}
blk, err := extractBlockDataType(rpcCtx, tor)
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
if err != nil {
return nil, err
}
@@ -139,30 +137,6 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
return blk, err
}
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
if len(digest) == 0 {
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return nil, errors.New("no block type exists for the genesis fork version.")
}
return bFunc()
}
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, blkFunc := range types.BlockMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return nil, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return blkFunc()
}
}
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
}
// WriteBlobSidecarChunk writes blob chunk object to stream.
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {

View File

@@ -1,121 +0,0 @@
package sync
import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestExtractBlockDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
chain blockchain.ChainInfoFetcher
}
tests := []struct {
name string
args args
want interfaces.ReadOnlySignedBeaconBlock
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "bellatrix fork version",
args: args{
digest: bellatrixDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -7,13 +7,9 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
@@ -112,7 +108,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
if err != nil {
return nil, err
}
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
if err != nil {
return nil, err
}
@@ -133,27 +129,3 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
}
return msg, nil
}
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
if len(digest) == 0 {
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return nil, errors.New("no metadata type exists for the genesis fork version.")
}
return mdFunc(), nil
}
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, mdFunc := range types.MetaDataMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return nil, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return mdFunc(), nil
}
}
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
}

View File

@@ -2,16 +2,13 @@ package sync
import (
"context"
"reflect"
"sync"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
db "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
@@ -21,7 +18,6 @@ import (
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -233,80 +229,3 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
t.Error("Peer is disconnected despite receiving a valid ping")
}
}
func TestExtractMetaDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
clock blockchain.TemporalOracle
}
tests := []struct {
name string
args args
want metadata.Metadata
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
if (err != nil) != tt.wantErr {
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -122,7 +122,7 @@ type Service struct {
cancel context.CancelFunc
slotToPendingBlocks *gcache.Cache
seenPendingBlocks map[[32]byte]bool
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
blkRootToPendingAtts map[[32]byte][]ethpb.SignedAggregateAttAndProof
subHandler *subTopicHandler
pendingAttsLock sync.RWMutex
pendingQueueLock sync.RWMutex
@@ -171,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
slotToPendingBlocks: c,
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
for _, opt := range opts {

View File

@@ -13,19 +13,21 @@ import (
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
// attestation pool for processing.
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof)
a, ok := msg.(ethpb.SignedAggregateAttAndProof)
if !ok {
return fmt.Errorf("message was not type *ethpb.SignedAggregateAttestationAndProof, type=%T", msg)
return fmt.Errorf("message was not type ethpb.SignedAggregateAttAndProof, type=%T", msg)
}
if a.Message.Aggregate == nil || a.Message.Aggregate.Data == nil {
aggregate := a.AggregateAttestationAndProof().AggregateVal()
if aggregate == nil || aggregate.GetData() == nil {
return errors.New("nil aggregate")
}
// An unaggregated attestation can make it here. Its valid, the aggregator it just itself, although it means poor performance for the subnet.
if !helpers.IsAggregated(a.Message.Aggregate) {
return s.cfg.attPool.SaveUnaggregatedAttestation(a.Message.Aggregate)
if !helpers.IsAggregated(aggregate) {
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
}
return s.cfg.attPool.SaveAggregatedAttestation(a.Message.Aggregate)
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
}

View File

@@ -15,19 +15,21 @@ import (
)
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
a, ok := msg.(*eth.Attestation)
a, ok := msg.(eth.Att)
if !ok {
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
return fmt.Errorf("message was not type eth.Att, type=%T", msg)
}
if a.Data == nil {
data := a.GetData()
if data == nil {
return errors.New("nil attestation")
}
s.setSeenCommitteeIndicesSlot(a.Data.Slot, a.Data.CommitteeIndex, a.AggregationBits)
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, a.GetAggregationBits())
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
if err != nil {
return errors.Wrap(err, "Could not determine if attestation pool has this atttestation")
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
}
if exists {
return nil
@@ -36,11 +38,11 @@ func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, m
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
}
func (_ *Service) persistentSubnetIndices() []uint64 {
func (*Service) persistentSubnetIndices() []uint64 {
return cache.SubnetIDs.GetAllSubnets()
}
func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
endEpoch := slots.ToEpoch(currentSlot) + 1
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
var commIds []uint64
@@ -50,7 +52,7 @@ func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64
return slice.SetUint64(commIds)
}
func (_ *Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
func (*Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
endEpoch := slots.ToEpoch(currentSlot) + 1
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
var commIds []uint64

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
@@ -47,38 +48,48 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
}
m, ok := raw.(*ethpb.SignedAggregateAttestationAndProof)
m, ok := raw.(ethpb.SignedAggregateAttAndProof)
if !ok {
return pubsub.ValidationReject, errors.Errorf("invalid message type: %T", raw)
}
if m.Message == nil {
if m.AggregateAttestationAndProof() == nil {
return pubsub.ValidationReject, errNilMessage
}
if err := helpers.ValidateNilAttestation(m.Message.Aggregate); err != nil {
aggregate := m.AggregateAttestationAndProof().AggregateVal()
data := aggregate.GetData()
if err := helpers.ValidateNilAttestation(aggregate); err != nil {
return pubsub.ValidationReject, err
}
// Do not process slot 0 aggregates.
if m.Message.Aggregate.Data.Slot == 0 {
if data.Slot == 0 {
return pubsub.ValidationIgnore, nil
}
// Broadcast the aggregated attestation on a feed to notify other services in the beacon node
// of a received aggregated attestation.
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AggregatedAttReceived,
Data: &operation.AggregatedAttReceivedData{
Attestation: m.Message,
},
})
// TODO: this will be extended to Electra in a later PR
if m.Version() == version.Phase0 {
phase0Att, ok := m.(*ethpb.SignedAggregateAttestationAndProof)
if ok {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AggregatedAttReceived,
Data: &operation.AggregatedAttReceivedData{
Attestation: phase0Att.Message,
},
})
}
}
if err := helpers.ValidateSlotTargetEpoch(m.Message.Aggregate.Data); err != nil {
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
return pubsub.ValidationReject, err
}
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(
m.Message.Aggregate.Data.Slot,
data.Slot,
s.cfg.clock.GenesisTime(),
earlyAttestationProcessingTolerance,
); err != nil {
@@ -87,19 +98,19 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
}
// Verify this is the first aggregate received from the aggregator with index and slot.
if s.hasSeenAggregatorIndexEpoch(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex) {
if s.hasSeenAggregatorIndexEpoch(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex()) {
return pubsub.ValidationIgnore, nil
}
// Check that the block being voted on isn't invalid.
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.
seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate)
seen, err := s.cfg.attPool.HasAggregatedAttestation(aggregate)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
@@ -116,7 +127,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return validationRes, err
}
s.setAggregatorIndexEpochSeen(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex)
s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex())
msg.ValidatorData = m
@@ -125,44 +136,75 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationAccept, nil
}
func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.SignedAggregateAttestationAndProof) (pubsub.ValidationResult, error) {
func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.SignedAggregateAttAndProof) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateAggregatedAtt")
defer span.End()
aggregateAndProof := signed.AggregateAttestationAndProof()
aggregatorIndex := aggregateAndProof.GetAggregatorIndex()
aggregate := aggregateAndProof.AggregateVal()
data := aggregate.GetData()
// Verify attestation target root is consistent with the head root.
// This verification is not in the spec, however we guard against it as it opens us up
// to weird edge cases during verification. The attestation technically could be used to add value to a block,
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
tracing.AnnotateError(span, err)
attBadLmdConsistencyCount.Inc()
return pubsub.ValidationReject, err
}
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(signed.Message.Aggregate.Data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
}
bs, err := s.cfg.chain.AttestationTargetState(ctx, signed.Message.Aggregate.Data.Target)
bs, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
// Verify validator index is within the beacon committee.
result, err := s.validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex)
result, err := s.validateIndexInCommittee(ctx, bs, aggregate, aggregatorIndex)
if result != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
wrappedErr := errors.Wrapf(err, "could not validate index in committee")
tracing.AnnotateError(span, wrappedErr)
return result, wrappedErr
}
var committeeIndex primitives.CommitteeIndex
if signed.Version() >= version.Electra {
a, ok := aggregate.(*ethpb.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
err := fmt.Errorf("aggregate attestation has wrong type (expected %T, got %T)", &ethpb.AttestationElectra{}, aggregate)
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
committeeIndex, result, err = validateCommitteeIndexElectra(ctx, a)
if result != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
tracing.AnnotateError(span, wrappedErr)
return result, wrappedErr
}
} else {
committeeIndex = data.CommitteeIndex
}
// Verify selection proof reflects to the right validator.
selectionSigSet, err := validateSelectionIndex(ctx, bs, signed.Message.Aggregate.Data, signed.Message.AggregatorIndex, signed.Message.SelectionProof)
selectionSigSet, err := validateSelectionIndex(
ctx,
bs,
data.Slot,
committeeIndex,
aggregatorIndex,
aggregateAndProof.GetSelectionProof(),
)
if err != nil {
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
wrappedErr := errors.Wrapf(err, "could not validate selection for validator %d", aggregateAndProof.GetAggregatorIndex())
tracing.AnnotateError(span, wrappedErr)
attBadSelectionProofCount.Inc()
return pubsub.ValidationReject, wrappedErr
@@ -172,13 +214,13 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
// We use batch verify here to save compute.
aggregatorSigSet, err := aggSigSet(bs, signed)
if err != nil {
wrappedErr := errors.Wrapf(err, "Could not get aggregator sig set %d", signed.Message.AggregatorIndex)
wrappedErr := errors.Wrapf(err, "could not get aggregator sig set %d", aggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
return pubsub.ValidationIgnore, wrappedErr
}
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{signed.Message.Aggregate})
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{aggregate})
if err != nil {
wrappedErr := errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex)
wrappedErr := errors.Wrapf(err, "could not verify aggregator signature %d", aggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
return pubsub.ValidationIgnore, wrappedErr
}
@@ -188,10 +230,9 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
return s.validateWithBatchVerifier(ctx, "aggregate", set)
}
func (s *Service) validateBlockInAttestation(ctx context.Context, satt *ethpb.SignedAggregateAttestationAndProof) bool {
a := satt.Message
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(a.Aggregate.Data.BeaconBlockRoot)
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
if !s.hasBlockAndState(ctx, blockRoot) {
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
s.savePendingAtt(satt)
@@ -234,7 +275,7 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
return result, err
}
committee, result, err := s.validateBitLength(ctx, a, bs)
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
if result != pubsub.ValidationAccept {
return result, err
}
@@ -262,14 +303,15 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
func validateSelectionIndex(
ctx context.Context,
bs state.ReadOnlyBeaconState,
data *ethpb.AttestationData,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
validatorIndex primitives.ValidatorIndex,
proof []byte,
) (*bls.SignatureBatch, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateSelectionIndex")
defer span.End()
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, data.Slot, data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
if err != nil {
return nil, err
}
@@ -278,11 +320,11 @@ func validateSelectionIndex(
return nil, err
}
if !aggregator {
return nil, fmt.Errorf("validator is not an aggregator for slot %d", data.Slot)
return nil, fmt.Errorf("validator is not an aggregator for slot %d", slot)
}
domain := params.BeaconConfig().DomainSelectionProof
epoch := slots.ToEpoch(data.Slot)
epoch := slots.ToEpoch(slot)
v, err := bs.ValidatorAtIndex(validatorIndex)
if err != nil {
@@ -297,7 +339,7 @@ func validateSelectionIndex(
if err != nil {
return nil, err
}
sszUint := primitives.SSZUint64(data.Slot)
sszUint := primitives.SSZUint64(slot)
root, err := signing.ComputeSigningRoot(&sszUint, d)
if err != nil {
return nil, err
@@ -311,8 +353,10 @@ func validateSelectionIndex(
}
// This returns aggregator signature set which can be used to batch verify.
func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureBatch, error) {
v, err := s.ValidatorAtIndex(a.Message.AggregatorIndex)
func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof) (*bls.SignatureBatch, error) {
aggregateAndProof := a.AggregateAttestationAndProof()
v, err := s.ValidatorAtIndex(aggregateAndProof.GetAggregatorIndex())
if err != nil {
return nil, err
}
@@ -321,17 +365,17 @@ func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationA
return nil, err
}
epoch := slots.ToEpoch(a.Message.Aggregate.Data.Slot)
epoch := slots.ToEpoch(aggregateAndProof.AggregateVal().GetData().Slot)
d, err := signing.Domain(s.Fork(), epoch, params.BeaconConfig().DomainAggregateAndProof, s.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
root, err := signing.ComputeSigningRoot(a.Message, d)
root, err := signing.ComputeSigningRoot(aggregateAndProof, d)
if err != nil {
return nil, err
}
return &bls.SignatureBatch{
Signatures: [][]byte{a.Signature},
Signatures: [][]byte{a.GetSignature()},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{root},
Descriptions: []string{signing.AggregatorSignature},

View File

@@ -117,7 +117,7 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) {
sig := privKeys[0].Sign([]byte{'A'})
data := util.HydrateAttestationData(&ethpb.AttestationData{})
_, err := validateSelectionIndex(ctx, beaconState, data, 0, sig.Marshal())
_, err := validateSelectionIndex(ctx, beaconState, data.Slot, data.CommitteeIndex, 0, sig.Marshal())
wanted := "validator is not an aggregator for slot"
assert.ErrorContains(t, wanted, err)
}
@@ -149,7 +149,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
attPool: attestations.NewPool(),
chain: &mock.ChainService{},
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
seenAggregatedAttestationCache: c,
}
r.initCaches()
@@ -302,7 +302,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
},
seenAggregatedAttestationCache: lruwrpr.New(10),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
}
r.initCaches()

View File

@@ -9,6 +9,7 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
@@ -22,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
@@ -55,16 +57,18 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return pubsub.ValidationReject, err
}
att, ok := m.(*eth.Attestation)
att, ok := m.(eth.Att)
if !ok {
return pubsub.ValidationReject, errWrongMessage
}
data := att.GetData()
if err := helpers.ValidateNilAttestation(att); err != nil {
return pubsub.ValidationReject, err
}
// Do not process slot 0 attestations.
if att.Data.Slot == 0 {
if data.Slot == 0 {
return pubsub.ValidationIgnore, nil
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
@@ -78,15 +82,36 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(),
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
earlyAttestationProcessingTolerance); err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
return pubsub.ValidationReject, err
}
var validationRes pubsub.ValidationResult
var committeeIndex primitives.CommitteeIndex
if att.Version() >= version.Electra {
a, ok := att.(*eth.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
err := fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.AttestationElectra{}, att)
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
committeeIndex, validationRes, err = validateCommitteeIndexElectra(ctx, a)
if validationRes != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
tracing.AnnotateError(span, wrappedErr)
return validationRes, wrappedErr
}
} else {
committeeIndex = data.CommitteeIndex
}
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
@@ -94,13 +119,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
log.WithError(err).Error("Could not retrieve pre state")
tracing.AnnotateError(span, err)
return
}
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
@@ -117,27 +142,41 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
}
// Verify this the first attestation received for the participating validator for the slot.
if s.hasSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) {
if s.hasSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits()) {
return pubsub.ValidationIgnore, nil
}
// Reject an attestation if it references an invalid block.
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
}
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
if !s.hasBlockAndState(ctx, blockRoot) {
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
s.savePendingAtt(&eth.SignedAggregateAttestationAndProof{Message: &eth.AggregateAttestationAndProof{Aggregate: att}})
if att.Version() >= version.Electra {
a, ok := att.(*eth.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.AttestationElectra{}, att)
}
s.savePendingAtt(&eth.SignedAggregateAttestationAndProofElectra{Message: &eth.AggregateAttestationAndProofElectra{Aggregate: a}})
} else {
a, ok := att.(*eth.Attestation)
// This will never fail in practice because we asserted the version
if !ok {
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.Attestation{}, att)
}
s.savePendingAtt(&eth.SignedAggregateAttestationAndProof{Message: &eth.AggregateAttestationAndProof{Aggregate: a}})
}
return pubsub.ValidationIgnore, nil
}
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
}
@@ -147,13 +186,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return pubsub.ValidationReject, err
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
if validationRes != pubsub.ValidationAccept {
return validationRes, err
}
@@ -163,7 +202,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return validationRes, err
}
s.setSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits)
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
msg.ValidatorData = att
@@ -211,7 +250,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttWithState")
defer span.End()
committee, result, err := s.validateBitLength(ctx, a, bs)
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
if result != pubsub.ValidationAccept {
return result, err
}
@@ -232,14 +271,20 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
return s.validateWithBatchVerifier(ctx, "attestation", set)
}
func (s *Service) validateBitLength(ctx context.Context, a eth.Att, bs state.ReadOnlyBeaconState) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex)
func (s *Service) validateBitLength(
ctx context.Context,
bs state.ReadOnlyBeaconState,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
aggregationBits bitfield.Bitlist,
) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
if err != nil {
return nil, pubsub.ValidationIgnore, err
}
// Verify number of aggregation bits matches the committee size.
if err := helpers.VerifyBitfieldLength(a.GetAggregationBits(), uint64(len(committee))); err != nil {
if err := helpers.VerifyBitfieldLength(aggregationBits, uint64(len(committee))); err != nil {
return nil, pubsub.ValidationReject, err
}

View File

@@ -0,0 +1,27 @@
package sync
import (
"context"
"fmt"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
func validateCommitteeIndexElectra(ctx context.Context, a *ethpb.AttestationElectra) (primitives.CommitteeIndex, pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateCommitteeIndexElectra")
defer span.End()
ci := a.Data.CommitteeIndex
if ci != 0 {
return 0, pubsub.ValidationReject, fmt.Errorf("committee index must be 0 but was %d", ci)
}
committeeIndices := helpers.CommitteeIndices(a.CommitteeBits)
if len(committeeIndices) != 1 {
return 0, pubsub.ValidationReject, fmt.Errorf("exactly 1 committee index must be set but %d were set", len(committeeIndices))
}
return committeeIndices[0], pubsub.ValidationAccept, nil
}

View File

@@ -0,0 +1,46 @@
package sync
import (
"context"
"testing"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func Test_validateCommitteeIndexElectra(t *testing.T) {
ctx := context.Background()
t.Run("valid", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(1, true)
ci, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
require.NoError(t, err)
assert.Equal(t, pubsub.ValidationAccept, res)
assert.Equal(t, primitives.CommitteeIndex(1), ci)
})
t.Run("non-zero data committee index", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(1, true)
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{CommitteeIndex: 1}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
t.Run("no committee bits set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
t.Run("more than 1 committee bit set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
cb.SetBitAt(1, true)
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
}

View File

@@ -49,7 +49,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
},
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -290,7 +290,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
m.Message.Topic = nil
}
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "" /*peerID*/, m)
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "", m)
received := res == pubsub.ValidationAccept
if received != tt.want {
t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want)

View File

@@ -1,6 +1,7 @@
package loader
import (
"encoding/json"
"fmt"
"strconv"
@@ -128,12 +129,23 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, err
}
loadConfig = dbps.ToConsensus()
log.Debugf("DB loaded proposer settings: %s", func() string {
b, err := json.Marshal(loadConfig)
if err != nil {
return err.Error()
}
return string(b)
}())
}
// start to process based on load method
for _, method := range psl.loadMethods {
switch method {
case defaultFlag:
if psl.existsInDB && len(psl.loadMethods) == 1 {
// only log the below if default flag is the only load method
log.Warn("Previously saved proposer settings were loaded from the DB, only default settings will be updated. Please provide new proposer settings or clear DB to reset proposer settings.")
}
suggestedFeeRecipient := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
if !common.IsHexAddress(suggestedFeeRecipient) {
return nil, errors.Errorf("--%s is not a valid Ethereum address", flags.SuggestedFeeRecipientFlag.Name)
@@ -157,6 +169,7 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, errors.Errorf("proposer settings is empty after unmarshalling from file specified by %s flag", flags.ProposerSettingsFlag.Name)
}
loadConfig = psl.processProposerSettings(settingFromFile, loadConfig)
log.WithField(flags.ProposerSettingsFlag.Name, cliCtx.String(flags.ProposerSettingsFlag.Name)).Info("Proposer settings loaded from file")
case urlFlag:
var settingFromURL *validatorpb.ProposerSettingsPayload
if err := config.UnmarshalFromURL(cliCtx.Context, cliCtx.String(flags.ProposerSettingsURLFlag.Name), &settingFromURL); err != nil {
@@ -166,9 +179,14 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, errors.New("proposer settings is empty after unmarshalling from url")
}
loadConfig = psl.processProposerSettings(settingFromURL, loadConfig)
log.WithField(flags.ProposerSettingsURLFlag.Name, cliCtx.String(flags.ProposerSettingsURLFlag.Name)).Infof("Proposer settings loaded from URL")
case onlyDB:
loadConfig = psl.processProposerSettings(nil, loadConfig)
log.Info("Proposer settings loaded from the DB")
case none:
if psl.existsInDB {
log.Info("Proposer settings loaded from the DB")
}
if psl.options.builderConfig != nil {
// if there are no proposer settings provided, create a default where fee recipient is not populated, this will be skipped for validator registration on validators that don't have a fee recipient set.
// skip saving to DB if only builder settings are provided until a trigger like keymanager API updates with fee recipient values

View File

@@ -2973,14 +2973,14 @@ def prysm_deps():
go_repository(
name = "com_github_prysmaticlabs_fastssz",
importpath = "github.com/prysmaticlabs/fastssz",
sum = "h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=",
version = "v0.0.0-20221107182844-78142813af44",
sum = "h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=",
version = "v0.0.0-20240620202422-a981b8ef89d3",
)
go_repository(
name = "com_github_prysmaticlabs_go_bitfield",
importpath = "github.com/prysmaticlabs/go-bitfield",
sum = "h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw=",
version = "v0.0.0-20210809151128-385d8c5e3fb7",
sum = "h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=",
version = "v0.0.0-20240328144219-a1caa50c3a1e",
)
go_repository(
name = "com_github_prysmaticlabs_gohashtree",

View File

@@ -7,6 +7,7 @@ go_library(
"helpers.go",
"htrutils.go",
"merkleize.go",
"slice_root.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/encoding/ssz",
visibility = ["//visibility:public"],

View File

@@ -144,51 +144,13 @@ func WithdrawalSliceRoot(withdrawals []*enginev1.Withdrawal, limit uint64) ([32]
// DepositRequestsSliceRoot computes the HTR of a slice of deposit receipts.
// The limit parameter is used as input to the bitwise merkleization algorithm.
func DepositRequestsSliceRoot(depositRequests []*enginev1.DepositRequest, limit uint64) ([32]byte, error) {
roots := make([][32]byte, len(depositRequests))
for i := 0; i < len(depositRequests); i++ {
r, err := depositRequests[i].HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
roots[i] = r
}
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
}
bytesRootBuf := new(bytes.Buffer)
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(depositRequests))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal length")
}
bytesRootBufRoot := make([]byte, 32)
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
return MixInLength(bytesRoot, bytesRootBufRoot), nil
return SliceRoot(depositRequests, limit)
}
// WithdrawalRequestsSliceRoot computes the HTR of a slice of withdrawal requests from the EL.
// The limit parameter is used as input to the bitwise merkleization algorithm.
func WithdrawalRequestsSliceRoot(withdrawalRequests []*enginev1.WithdrawalRequest, limit uint64) ([32]byte, error) {
roots := make([][32]byte, len(withdrawalRequests))
for i := 0; i < len(withdrawalRequests); i++ {
r, err := withdrawalRequests[i].HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
roots[i] = r
}
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
}
bytesRootBuf := new(bytes.Buffer)
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(withdrawalRequests))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal length")
}
bytesRootBufRoot := make([]byte, 32)
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
return MixInLength(bytesRoot, bytesRootBufRoot), nil
return SliceRoot(withdrawalRequests, limit)
}
// ByteSliceRoot is a helper func to merkleize an arbitrary List[Byte, N]

View File

@@ -1,4 +1,4 @@
package stateutil
package ssz
import (
"bytes"
@@ -6,11 +6,10 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
)
// SliceRoot computes the root of a slice of hashable objects.
func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
max := limit
if uint64(len(slice)) > max {
return [32]byte{}, fmt.Errorf("slice exceeds max length %d", max)
@@ -25,7 +24,7 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
roots[i] = r
}
sliceRoot, err := ssz.BitwiseMerkleize(roots, uint64(len(roots)), limit)
sliceRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not slice merkleization")
}
@@ -36,6 +35,5 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
// We need to mix in the length of the slice.
sliceLenRoot := make([]byte, 32)
copy(sliceLenRoot, sliceLenBuf.Bytes())
res := ssz.MixInLength(sliceRoot, sliceLenRoot)
return res, nil
return MixInLength(sliceRoot, sliceLenRoot), nil
}

4
go.mod
View File

@@ -65,8 +65,8 @@ require (
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/prom2json v1.3.0
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294
github.com/rs/cors v1.7.0

8
go.sum
View File

@@ -966,11 +966,11 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg=
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3 h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3/go.mod h1:h2OlIZD/M6wFvV3YMZbW16lFgh3Rsye00G44J2cwLyU=
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw=
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4=
github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs=
github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446 h1:4wctORg/1TkgLgXejv9yOSAm3cDBJxoTzl/RNuZmX28=

View File

@@ -152,7 +152,7 @@
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok"
".*\\.ssz\\.go": "Generated code is ok"
}
},
"properpermissions": {
@@ -180,7 +180,7 @@
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok",
".*\\.ssz\\.go": "Generated code is ok",
".*_test\\.go": "Tests are ok (for now)",
"tools/analyzers/ineffassign/ineffassign\\.go": "3rd party code with a massive switch statement"
}

View File

@@ -31,6 +31,7 @@ proto_library(
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "engine.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
],

View File

@@ -190,7 +190,7 @@ func (e *ExecutionPayload) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 508 {
if o10 != 508 {
return ssz.ErrInvalidVariableOffset
}
@@ -336,11 +336,7 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -374,25 +370,13 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -594,7 +578,7 @@ func (e *ExecutionPayloadCapella) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 512 {
if o10 != 512 {
return ssz.ErrInvalidVariableOffset
}
@@ -766,11 +750,7 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -804,18 +784,10 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -831,18 +803,10 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1050,7 +1014,7 @@ func (e *ExecutionPayloadDeneb) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 528 {
if o10 != 528 {
return ssz.ErrInvalidVariableOffset
}
@@ -1228,11 +1192,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -1266,18 +1226,10 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -1293,11 +1245,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (15) 'BlobGasUsed'
@@ -1306,11 +1254,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (16) 'ExcessBlobGas'
hh.PutUint64(e.ExcessBlobGas)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1548,7 +1492,7 @@ func (e *ExecutionPayloadElectra) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 536 {
if o10 != 536 {
return ssz.ErrInvalidVariableOffset
}
@@ -1778,11 +1722,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -1816,18 +1756,10 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -1843,11 +1775,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (15) 'BlobGasUsed'
@@ -1869,11 +1797,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 8192)
} else {
hh.MerkleizeWithMixin(subIndx, num, 8192)
}
hh.MerkleizeWithMixin(subIndx, num, 8192)
}
// Field (18) 'WithdrawalRequests'
@@ -1889,18 +1813,10 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2067,7 +1983,7 @@ func (e *ExecutionPayloadHeader) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 536 {
if o10 != 536 {
return ssz.ErrInvalidVariableOffset
}
@@ -2185,11 +2101,7 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2213,11 +2125,7 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(e.TransactionsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2391,7 +2299,7 @@ func (e *ExecutionPayloadHeaderCapella) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 568 {
if o10 != 568 {
return ssz.ErrInvalidVariableOffset
}
@@ -2515,11 +2423,7 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2550,11 +2454,7 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
}
hh.PutBytes(e.WithdrawalsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2734,7 +2634,7 @@ func (e *ExecutionPayloadHeaderDeneb) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 584 {
if o10 != 584 {
return ssz.ErrInvalidVariableOffset
}
@@ -2864,11 +2764,7 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2905,11 +2801,7 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
// Field (16) 'ExcessBlobGas'
hh.PutUint64(e.ExcessBlobGas)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3103,7 +2995,7 @@ func (e *ExecutionPayloadHeaderElectra) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 648 {
if o10 != 648 {
return ssz.ErrInvalidVariableOffset
}
@@ -3245,11 +3137,7 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -3300,11 +3188,7 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
}
hh.PutBytes(e.WithdrawalRequestsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3393,11 +3277,7 @@ func (w *Withdrawal) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (3) 'Amount'
hh.PutUint64(w.Amount)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3481,7 +3361,7 @@ func (b *BlobsBundle) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 12 {
if o0 != 12 {
return ssz.ErrInvalidVariableOffset
}
@@ -3586,11 +3466,7 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.KzgCommitments))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (1) 'Proofs'
@@ -3609,11 +3485,7 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.Proofs))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'Blobs'
@@ -3632,18 +3504,10 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.Blobs))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3734,11 +3598,7 @@ func (w *WithdrawalRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (2) 'Amount'
hh.PutUint64(w.Amount)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3858,10 +3718,6 @@ func (d *DepositRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (4) 'Index'
hh.PutUint64(d.Index)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}

View File

@@ -35,6 +35,7 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "gateway.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: d06a72227c2f5e350916cce3e89f4e855135a2a22f6ea263dedc68fa506c1ba7
// Hash: 2874e1dadeb47411763f48fe31e5daaa91ac663e796933d9a508c2e7be94fa5e
package v1
import (
@@ -62,7 +62,7 @@ func (a *Attestation) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 228 {
if o0 != 228 {
return ssz.ErrInvalidVariableOffset
}
@@ -132,11 +132,7 @@ func (a *Attestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(a.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -194,7 +190,7 @@ func (a *AggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o1 < 108 {
if o1 != 108 {
return ssz.ErrInvalidVariableOffset
}
@@ -254,11 +250,7 @@ func (a *AggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (err err
}
hh.PutBytes(a.SelectionProof)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -310,7 +302,7 @@ func (s *SignedAggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 100 {
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
@@ -367,11 +359,7 @@ func (s *SignedAggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (e
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -493,11 +481,7 @@ func (a *AttestationData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -568,11 +552,7 @@ func (c *Checkpoint) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(c.Root)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -655,7 +635,7 @@ func (b *BeaconBlock) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o4 < 84 {
if o4 != 84 {
return ssz.ErrInvalidVariableOffset
}
@@ -719,11 +699,7 @@ func (b *BeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -775,7 +751,7 @@ func (s *SignedBeaconBlock) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 100 {
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
@@ -832,11 +808,7 @@ func (s *SignedBeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1006,7 +978,7 @@ func (b *BeaconBlockBody) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o3 < 220 {
if o3 != 220 {
return ssz.ErrInvalidVariableOffset
}
@@ -1199,11 +1171,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (4) 'AttesterSlashings'
@@ -1219,11 +1187,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 2)
} else {
hh.MerkleizeWithMixin(subIndx, num, 2)
}
hh.MerkleizeWithMixin(subIndx, num, 2)
}
// Field (5) 'Attestations'
@@ -1239,11 +1203,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 128)
} else {
hh.MerkleizeWithMixin(subIndx, num, 128)
}
hh.MerkleizeWithMixin(subIndx, num, 128)
}
// Field (6) 'Deposits'
@@ -1259,11 +1219,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (7) 'VoluntaryExits'
@@ -1279,18 +1235,10 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1374,11 +1322,7 @@ func (p *ProposerSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1435,7 +1379,7 @@ func (a *AttesterSlashing) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 8 {
if o0 != 8 {
return ssz.ErrInvalidVariableOffset
}
@@ -1506,11 +1450,7 @@ func (a *AttesterSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1604,12 +1544,7 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.Append(i)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(subIndx)
} else {
hh.Merkleize(subIndx)
}
hh.Merkleize(subIndx)
}
// Field (1) 'Data'
@@ -1617,11 +1552,7 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1681,11 +1612,7 @@ func (v *VoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (1) 'ValidatorIndex'
hh.PutUint64(uint64(v.ValidatorIndex))
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1768,11 +1695,7 @@ func (s *SignedVoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1863,11 +1786,7 @@ func (e *Eth1Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(e.BlockHash)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1987,11 +1906,7 @@ func (b *BeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(b.BodyRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2074,11 +1989,7 @@ func (s *SignedBeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2139,7 +2050,7 @@ func (i *IndexedAttestation) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 228 {
if o0 != 228 {
return ssz.ErrInvalidVariableOffset
}
@@ -2204,11 +2115,7 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.FillUpTo32()
numItems := uint64(len(i.AttestingIndices))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
} else {
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
}
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
}
// Field (1) 'Data'
@@ -2223,11 +2130,7 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(i.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2309,11 +2212,7 @@ func (s *SyncAggregate) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.SyncCommitteeSignature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2424,11 +2323,7 @@ func (d *Deposit_Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(d.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2564,10 +2459,6 @@ func (v *Validator) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (7) 'WithdrawableEpoch'
hh.PutUint64(uint64(v.WithdrawableEpoch))
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}

View File

@@ -31,6 +31,7 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "grpc.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",

File diff suppressed because it is too large Load Diff

View File

@@ -45,111 +45,211 @@ proto_library(
##############################################################################
# Go
##############################################################################
ssz_phase0_objs = [
"AggregateAttestationAndProof",
"Attestation",
"AttestationData",
"AttesterSlashing",
"BeaconBlock",
"BeaconBlockHeader",
"BeaconState",
"Checkpoint",
"Deposit",
"Deposit_Data",
"DepositMessage",
"ENRForkID",
"Eth1Data",
"Fork",
"ForkData",
"HistoricalBatch",
"IndexedAttestation",
"PowBlock",
"ProposerSlashing",
"SignedAggregateAttestationAndProof",
"SignedBeaconBlock",
"SignedBeaconBlockHeader",
"SignedVoluntaryExit",
"SigningData",
"Status",
"Status",
"Validator",
"VoluntaryExit",
]
ssz_altair_objs = [
"BeaconBlockAltair",
"BeaconBlockBodyAltair",
"BeaconStateAltair",
"ContributionAndProof",
"SignedBeaconBlockAltair",
"SignedContributionAndProof",
"SyncAggregate",
"SyncAggregate",
"SyncAggregatorSelectionData",
"SyncCommittee",
"SyncCommitteeContribution",
"SyncCommitteeMessage",
]
ssz_bellatrix_objs = [
"BeaconBlockBellatrix",
"BeaconBlockBodyBellatrix",
"BeaconStateBellatrix",
"BlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBodyBellatrix",
"SignedBeaconBlockBellatrix",
"SignedBlindedBeaconBlockBellatrix",
]
ssz_capella_objs = [
"BLSToExecutionChange",
"BeaconBlockBodyCapella",
"BeaconBlockCapella",
"BeaconStateCapella",
"BlindedBeaconBlockBodyCapella",
"BlindedBeaconBlockCapella",
"BuilderBidCapella",
"HistoricalSummary",
"SignedBLSToExecutionChange",
"SignedBeaconBlockCapella",
"SignedBlindedBeaconBlockCapella",
"Withdrawal",
]
ssz_deneb_objs = [
"BeaconBlockBodyDeneb",
"BeaconBlockContentsDeneb",
"BeaconBlockDeneb",
"BeaconStateDeneb",
"BlindedBeaconBlockBodyDeneb",
"BlindedBeaconBlockDeneb",
"BlobIdentifier",
"BlobSidecar",
"BlobSidecars",
"BuilderBidDeneb",
"SignedBeaconBlockContentsDeneb",
"SignedBeaconBlockDeneb",
"SignedBlindedBeaconBlockDeneb",
]
ssz_electra_objs = [
"AggregateAttestationAndProofElectra",
"AttestationElectra",
"AttesterSlashingElectra",
"BeaconBlockElectra",
"BeaconBlockElectra",
"BeaconStateElectra",
"BlindedBeaconBlockBodyElectra",
"BlindedBeaconBlockElectra",
"Consolidation",
"IndexedAttestationElectra",
"PendingBalanceDeposit",
"PendingBalanceDeposits",
"PendingConsolidation",
"PendingPartialWithdrawal",
"SignedAggregateAttestationAndProofElectra",
"SignedBeaconBlockElectra",
"SignedBlindedBeaconBlockElectra",
"SignedConsolidation",
]
ssz_gen_marshal(
name = "ssz_generated_files",
name = "ssz_generated_phase0",
go_proto = ":go_proto",
out = "phase0.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_phase0_objs,
)
ssz_gen_marshal(
name = "ssz_generated_altair",
go_proto = ":go_proto",
out = "altair.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_altair_objs,
exclude_objs = ssz_phase0_objs,
)
ssz_gen_marshal(
name = "ssz_generated_bellatrix",
go_proto = ":go_proto",
out = "bellatrix.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_bellatrix_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs,
)
ssz_gen_marshal(
name = "ssz_generated_capella",
go_proto = ":go_proto",
out = "capella.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_capella_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs,
)
ssz_gen_marshal(
name = "ssz_generated_deneb",
go_proto = ":go_proto",
out = "deneb.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_deneb_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs,
)
ssz_gen_marshal(
name = "ssz_generated_electra",
go_proto = ":go_proto",
out = "electra.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_electra_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs,
)
ssz_gen_marshal(
name = "ssz_generated_non_core",
go_proto = ":go_proto",
out = "non-core.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = [
"BeaconBlockAltair",
"BeaconBlockBodyAltair",
"SignedBeaconBlockAltair",
"BeaconBlockBellatrix",
"BeaconBlockBodyBellatrix",
"SignedBeaconBlockBellatrix",
"SignedBlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBodyBellatrix",
"BeaconBlockCapella",
"BeaconBlockBodyCapella",
"SignedBeaconBlockCapella",
"BeaconBlockDeneb",
"BeaconBlockBodyDeneb",
"SignedBeaconBlockDeneb",
"BeaconBlockElectra",
"BeaconBlockElectra",
"SignedBeaconBlockElectra",
"SignedBlindedBeaconBlockCapella",
"BlindedBeaconBlockCapella",
"BlindedBeaconBlockBodyCapella",
"SignedBlindedBeaconBlockDeneb",
"BeaconBlockContentsDeneb",
"SignedBeaconBlockContentsDeneb",
"BlindedBeaconBlockDeneb",
"BlindedBeaconBlockBodyDeneb",
"SignedBlindedBeaconBlockElectra",
"BlindedBeaconBlockElectra",
"BlindedBeaconBlockBodyElectra",
"SyncAggregate",
"SyncCommitteeMessage",
"SyncCommitteeContribution",
"ContributionAndProof",
"SignedContributionAndProof",
"BeaconBlocksByRangeRequest",
"BlobSidecarsByRangeRequest",
"ENRForkID",
"MetaDataV0",
"MetaDataV1",
"Status",
"AggregateAttestationAndProof",
"AggregateAttestationAndProofElectra",
"Attestation",
"AttestationElectra",
"AttestationData",
"AttesterSlashing",
"AttesterSlashingElectra",
"BeaconBlock",
"BeaconBlockHeader",
"Checkpoint",
"Deposit",
"Eth1Data",
"IndexedAttestation",
"IndexedAttestationElectra",
"ProposerSlashing",
"SignedAggregateAttestationAndProof",
"SignedAggregateAttestationAndProofElectra",
"SignedBeaconBlock",
"SignedBeaconBlockHeader",
"SignedVoluntaryExit",
"Validator",
"VoluntaryExit",
"ContributionAndProof",
"SignedContributionAndProof",
"DepositMessage",
"Fork",
"ForkData",
"HistoricalBatch",
"Status",
"BeaconState",
"BeaconStateAltair",
"BeaconStateBellatrix",
"BeaconStateCapella",
"BeaconStateDeneb",
"BeaconStateElectra",
"SigningData",
"SyncCommittee",
"SyncAggregatorSelectionData",
"PowBlock",
"SignedValidatorRegistrationV1",
"ValidatorRegistrationV1",
"Withdrawal",
"BLSToExecutionChange",
"SignedBLSToExecutionChange",
"BuilderBid",
"BuilderBidCapella",
"BuilderBidDeneb",
"BlobSidecar",
"BlobSidecars",
"BlobIdentifier",
"DepositSnapshot",
"PendingBalanceDeposit",
"PendingPartialWithdrawal",
"Consolidation",
"SignedConsolidation",
"PendingConsolidation",
],
)
@@ -212,7 +312,13 @@ go_library(
"eip_7251.go",
"sync_committee_mainnet.go",
"sync_committee_minimal.go", # keep
":ssz_generated_files", # keep
":ssz_generated_non_core", # keep
":ssz_generated_phase0", # keep
":ssz_generated_altair", # keep
":ssz_generated_bellatrix", # keep
":ssz_generated_capella", # keep
":ssz_generated_deneb", # keep
":ssz_generated_electra", # keep
],
embed = [
":go_grpc_gateway_library",

File diff suppressed because it is too large Load Diff

View File

@@ -2,10 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["attestation_utils.go"],
srcs = [
"attestation_utils.go",
"id.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -20,7 +24,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["attestation_utils_test.go"],
srcs = [
"attestation_utils_test.go",
"id_test.go",
],
deps = [
":go_default_library",
"//config/fieldparams:go_default_library",
@@ -29,6 +36,7 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -12,6 +12,7 @@ go_library(
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// MaxCoverAttestationAggregation relies on Maximum Coverage greedy algorithm for aggregation.
@@ -171,11 +172,21 @@ func aggregateAttestations(atts []ethpb.Att, keys []int, coverage *bitfield.Bitl
}
}
// Put aggregated attestation at a position of the first selected attestation.
atts[targetIdx] = &ethpb.Attestation{
// Append size byte, which will be unnecessary on switch to Bitlist64.
AggregationBits: coverage.ToBitlist(),
Data: data,
Signature: aggregateSignatures(signs).Marshal(),
if atts[0].Version() == version.Phase0 {
atts[targetIdx] = &ethpb.Attestation{
// Append size byte, which will be unnecessary on switch to Bitlist64.
AggregationBits: coverage.ToBitlist(),
Data: data,
Signature: aggregateSignatures(signs).Marshal(),
}
} else {
atts[targetIdx] = &ethpb.AttestationElectra{
// Append size byte, which will be unnecessary on switch to Bitlist64.
AggregationBits: coverage.ToBitlist(),
CommitteeBits: atts[0].CommitteeBitsVal().Bytes(),
Data: data,
Signature: aggregateSignatures(signs).Marshal(),
}
}
return
}

View File

@@ -0,0 +1,71 @@
package attestation
import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// IdSource represents the part of attestation that will be used to generate the Id.
type IdSource uint8
const (
// Full generates the Id from the whole attestation.
Full IdSource = iota
// Data generates the Id from the tuple (slot, committee index, beacon block root, source, target).
Data
)
// Id represents an attestation ID. Its uniqueness depends on the IdSource provided when constructing the Id.
type Id [33]byte
// NewId --
func NewId(att ethpb.Att, source IdSource) (Id, error) {
if err := helpers.ValidateNilAttestation(att); err != nil {
return Id{}, err
}
if att.Version() < 0 || att.Version() > 255 {
return Id{}, errors.New("attestation version must be between 0 and 255")
}
var id Id
id[0] = byte(att.Version())
switch source {
case Full:
h, err := att.HashTreeRoot()
if err != nil {
return Id{}, err
}
copy(id[1:], h[:])
return id, nil
case Data:
data := att.GetData()
if att.Version() >= version.Electra {
committeeIndices := att.CommitteeBitsVal().BitIndices()
if len(committeeIndices) != 1 {
return Id{}, fmt.Errorf("%d committee bits are set instead of 1", len(committeeIndices))
}
dataCopy := ethpb.CopyAttestationData(att.GetData())
dataCopy.CommitteeIndex = primitives.CommitteeIndex(committeeIndices[0])
data = dataCopy
}
h, err := data.HashTreeRoot()
if err != nil {
return Id{}, err
}
copy(id[1:], h[:])
return id, nil
default:
return Id{}, errors.New("invalid source requested")
}
}
// String --
func (id Id) String() string {
return string(id[:])
}

View File

@@ -0,0 +1,63 @@
package attestation_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestNewId(t *testing.T) {
t.Run("full source", func(t *testing.T) {
att := util.HydrateAttestation(&ethpb.Attestation{})
_, err := attestation.NewId(att, attestation.Full)
assert.NoError(t, err)
})
t.Run("data source Phase 0", func(t *testing.T) {
att := util.HydrateAttestation(&ethpb.Attestation{})
_, err := attestation.NewId(att, attestation.Data)
assert.NoError(t, err)
})
t.Run("data source Electra", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{CommitteeBits: cb})
_, err := attestation.NewId(att, attestation.Data)
assert.NoError(t, err)
})
t.Run("ID is different between versions", func(t *testing.T) {
phase0Att := util.HydrateAttestation(&ethpb.Attestation{})
phase0Id, err := attestation.NewId(phase0Att, attestation.Data)
require.NoError(t, err)
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true) // setting committee bit 0 for Electra corresponds to attestation data's committee index 0 for Phase 0
electraAtt := util.HydrateAttestationElectra(&ethpb.AttestationElectra{CommitteeBits: cb})
electraId, err := attestation.NewId(electraAtt, attestation.Data)
require.NoError(t, err)
assert.NotEqual(t, phase0Id, electraId)
})
t.Run("invalid source", func(t *testing.T) {
att := util.HydrateAttestation(&ethpb.Attestation{})
_, err := attestation.NewId(att, 123)
assert.ErrorContains(t, "invalid source requested", err)
})
t.Run("data source Electra - 0 bits set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{CommitteeBits: cb})
_, err := attestation.NewId(att, attestation.Data)
assert.ErrorContains(t, "0 committee bits are set", err)
})
t.Run("data source Electra - multiple bits set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
cb.SetBitAt(1, true)
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{CommitteeBits: cb})
_, err := attestation.NewId(att, attestation.Data)
assert.ErrorContains(t, "2 committee bits are set", err)
})
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,780 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 9da8a498bd074553137a73197dadcae4d3b4239484f64bab4f0a734dce528d24
package eth
import (
ssz "github.com/prysmaticlabs/fastssz"
github_com_prysmaticlabs_prysm_v5_consensus_types_primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
)
// MarshalSSZ ssz marshals the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(v)
}
// MarshalSSZTo ssz marshals the ValidatorRegistrationV1 object to a target array
func (v *ValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'FeeRecipient'
if size := len(v.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
dst = append(dst, v.FeeRecipient...)
// Field (1) 'GasLimit'
dst = ssz.MarshalUint64(dst, v.GasLimit)
// Field (2) 'Timestamp'
dst = ssz.MarshalUint64(dst, v.Timestamp)
// Field (3) 'Pubkey'
if size := len(v.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
dst = append(dst, v.Pubkey...)
return
}
// UnmarshalSSZ ssz unmarshals the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 84 {
return ssz.ErrSize
}
// Field (0) 'FeeRecipient'
if cap(v.FeeRecipient) == 0 {
v.FeeRecipient = make([]byte, 0, len(buf[0:20]))
}
v.FeeRecipient = append(v.FeeRecipient, buf[0:20]...)
// Field (1) 'GasLimit'
v.GasLimit = ssz.UnmarshallUint64(buf[20:28])
// Field (2) 'Timestamp'
v.Timestamp = ssz.UnmarshallUint64(buf[28:36])
// Field (3) 'Pubkey'
if cap(v.Pubkey) == 0 {
v.Pubkey = make([]byte, 0, len(buf[36:84]))
}
v.Pubkey = append(v.Pubkey, buf[36:84]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) SizeSSZ() (size int) {
size = 84
return
}
// HashTreeRoot ssz hashes the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(v)
}
// HashTreeRootWith ssz hashes the ValidatorRegistrationV1 object with a hasher
func (v *ValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'FeeRecipient'
if size := len(v.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
hh.PutBytes(v.FeeRecipient)
// Field (1) 'GasLimit'
hh.PutUint64(v.GasLimit)
// Field (2) 'Timestamp'
hh.PutUint64(v.Timestamp)
// Field (3) 'Pubkey'
if size := len(v.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
hh.PutBytes(v.Pubkey)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the SignedValidatorRegistrationV1 object to a target array
func (s *SignedValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ValidatorRegistrationV1)
}
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
dst = append(dst, s.Signature...)
return
}
// UnmarshalSSZ ssz unmarshals the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 180 {
return ssz.ErrSize
}
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ValidatorRegistrationV1)
}
if err = s.Message.UnmarshalSSZ(buf[0:84]); err != nil {
return err
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[84:180]))
}
s.Signature = append(s.Signature, buf[84:180]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) SizeSSZ() (size int) {
size = 180
return
}
// HashTreeRoot ssz hashes the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the SignedValidatorRegistrationV1 object with a hasher
func (s *SignedValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Message'
if err = s.Message.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
hh.PutBytes(s.Signature)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BuilderBid object
func (b *BuilderBid) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BuilderBid object to a target array
func (b *BuilderBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(84)
// Offset (0) 'Header'
dst = ssz.WriteOffset(dst, offset)
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
offset += b.Header.SizeSSZ()
// Field (1) 'Value'
if size := len(b.Value); size != 32 {
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
return
}
dst = append(dst, b.Value...)
// Field (2) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
dst = append(dst, b.Pubkey...)
// Field (0) 'Header'
if dst, err = b.Header.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the BuilderBid object
func (b *BuilderBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 84 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Header'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 84 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Value'
if cap(b.Value) == 0 {
b.Value = make([]byte, 0, len(buf[4:36]))
}
b.Value = append(b.Value, buf[4:36]...)
// Field (2) 'Pubkey'
if cap(b.Pubkey) == 0 {
b.Pubkey = make([]byte, 0, len(buf[36:84]))
}
b.Pubkey = append(b.Pubkey, buf[36:84]...)
// Field (0) 'Header'
{
buf = tail[o0:]
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
if err = b.Header.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BuilderBid object
func (b *BuilderBid) SizeSSZ() (size int) {
size = 84
// Field (0) 'Header'
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
size += b.Header.SizeSSZ()
return
}
// HashTreeRoot ssz hashes the BuilderBid object
func (b *BuilderBid) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BuilderBid object with a hasher
func (b *BuilderBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Header'
if err = b.Header.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'Value'
if size := len(b.Value); size != 32 {
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
return
}
hh.PutBytes(b.Value)
// Field (2) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
hh.PutBytes(b.Pubkey)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BeaconBlocksByRangeRequest object to a target array
func (b *BeaconBlocksByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, b.Count)
// Field (2) 'Step'
dst = ssz.MarshalUint64(dst, b.Step)
return
}
// UnmarshalSSZ ssz unmarshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 24 {
return ssz.ErrSize
}
// Field (0) 'StartSlot'
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
// Field (1) 'Count'
b.Count = ssz.UnmarshallUint64(buf[8:16])
// Field (2) 'Step'
b.Step = ssz.UnmarshallUint64(buf[16:24])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) SizeSSZ() (size int) {
size = 24
return
}
// HashTreeRoot ssz hashes the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BeaconBlocksByRangeRequest object with a hasher
func (b *BeaconBlocksByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(uint64(b.StartSlot))
// Field (1) 'Count'
hh.PutUint64(b.Count)
// Field (2) 'Step'
hh.PutUint64(b.Step)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the MetaDataV0 object
func (m *MetaDataV0) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetaDataV0 object to a target array
func (m *MetaDataV0) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
dst = append(dst, m.Attnets...)
return
}
// UnmarshalSSZ ssz unmarshals the MetaDataV0 object
func (m *MetaDataV0) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
if cap(m.Attnets) == 0 {
m.Attnets = make([]byte, 0, len(buf[8:16]))
}
m.Attnets = append(m.Attnets, buf[8:16]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV0 object
func (m *MetaDataV0) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the MetaDataV0 object
func (m *MetaDataV0) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetaDataV0 object with a hasher
func (m *MetaDataV0) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
hh.PutBytes(m.Attnets)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the MetaDataV1 object
func (m *MetaDataV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetaDataV1 object to a target array
func (m *MetaDataV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
dst = append(dst, m.Attnets...)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
dst = append(dst, m.Syncnets...)
return
}
// UnmarshalSSZ ssz unmarshals the MetaDataV1 object
func (m *MetaDataV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 17 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
if cap(m.Attnets) == 0 {
m.Attnets = make([]byte, 0, len(buf[8:16]))
}
m.Attnets = append(m.Attnets, buf[8:16]...)
// Field (2) 'Syncnets'
if cap(m.Syncnets) == 0 {
m.Syncnets = make([]byte, 0, len(buf[16:17]))
}
m.Syncnets = append(m.Syncnets, buf[16:17]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV1 object
func (m *MetaDataV1) SizeSSZ() (size int) {
size = 17
return
}
// HashTreeRoot ssz hashes the MetaDataV1 object
func (m *MetaDataV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetaDataV1 object with a hasher
func (m *MetaDataV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
hh.PutBytes(m.Attnets)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
hh.PutBytes(m.Syncnets)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BlobSidecarsByRangeRequest object to a target array
func (b *BlobSidecarsByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, b.Count)
return
}
// UnmarshalSSZ ssz unmarshals the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'StartSlot'
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
// Field (1) 'Count'
b.Count = ssz.UnmarshallUint64(buf[8:16])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BlobSidecarsByRangeRequest object with a hasher
func (b *BlobSidecarsByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(uint64(b.StartSlot))
// Field (1) 'Count'
hh.PutUint64(b.Count)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the DepositSnapshot object
func (d *DepositSnapshot) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)
}
// MarshalSSZTo ssz marshals the DepositSnapshot object to a target array
func (d *DepositSnapshot) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(84)
// Offset (0) 'Finalized'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.Finalized) * 32
// Field (1) 'DepositRoot'
if size := len(d.DepositRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
return
}
dst = append(dst, d.DepositRoot...)
// Field (2) 'DepositCount'
dst = ssz.MarshalUint64(dst, d.DepositCount)
// Field (3) 'ExecutionHash'
if size := len(d.ExecutionHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
return
}
dst = append(dst, d.ExecutionHash...)
// Field (4) 'ExecutionDepth'
dst = ssz.MarshalUint64(dst, d.ExecutionDepth)
// Field (0) 'Finalized'
if size := len(d.Finalized); size > 32 {
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
return
}
for ii := 0; ii < len(d.Finalized); ii++ {
if size := len(d.Finalized[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.Finalized[ii]", size, 32)
return
}
dst = append(dst, d.Finalized[ii]...)
}
return
}
// UnmarshalSSZ ssz unmarshals the DepositSnapshot object
func (d *DepositSnapshot) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 84 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Finalized'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 84 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'DepositRoot'
if cap(d.DepositRoot) == 0 {
d.DepositRoot = make([]byte, 0, len(buf[4:36]))
}
d.DepositRoot = append(d.DepositRoot, buf[4:36]...)
// Field (2) 'DepositCount'
d.DepositCount = ssz.UnmarshallUint64(buf[36:44])
// Field (3) 'ExecutionHash'
if cap(d.ExecutionHash) == 0 {
d.ExecutionHash = make([]byte, 0, len(buf[44:76]))
}
d.ExecutionHash = append(d.ExecutionHash, buf[44:76]...)
// Field (4) 'ExecutionDepth'
d.ExecutionDepth = ssz.UnmarshallUint64(buf[76:84])
// Field (0) 'Finalized'
{
buf = tail[o0:]
num, err := ssz.DivideInt2(len(buf), 32, 32)
if err != nil {
return err
}
d.Finalized = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.Finalized[ii]) == 0 {
d.Finalized[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32]))
}
d.Finalized[ii] = append(d.Finalized[ii], buf[ii*32:(ii+1)*32]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the DepositSnapshot object
func (d *DepositSnapshot) SizeSSZ() (size int) {
size = 84
// Field (0) 'Finalized'
size += len(d.Finalized) * 32
return
}
// HashTreeRoot ssz hashes the DepositSnapshot object
func (d *DepositSnapshot) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(d)
}
// HashTreeRootWith ssz hashes the DepositSnapshot object with a hasher
func (d *DepositSnapshot) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Finalized'
{
if size := len(d.Finalized); size > 32 {
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
return
}
subIndx := hh.Index()
for _, i := range d.Finalized {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
numItems := uint64(len(d.Finalized))
hh.MerkleizeWithMixin(subIndx, numItems, 32)
}
// Field (1) 'DepositRoot'
if size := len(d.DepositRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
return
}
hh.PutBytes(d.DepositRoot)
// Field (2) 'DepositCount'
hh.PutUint64(d.DepositCount)
// Field (3) 'ExecutionHash'
if size := len(d.ExecutionHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
return
}
hh.PutBytes(d.ExecutionHash)
// Field (4) 'ExecutionDepth'
hh.PutUint64(d.ExecutionDepth)
hh.Merkleize(indx)
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -60,6 +60,9 @@ def _ssz_go_proto_library_impl(ctx):
if len(ctx.attr.objs) > 0:
args.append("--objs=%s" % ",".join(ctx.attr.objs))
if len(ctx.attr.exclude_objs) > 0:
args.append("--exclude-objs=%s" % ",".join(ctx.attr.exclude_objs))
ctx.actions.run(
executable = ctx.executable.sszgen,
progress_message = "Generating ssz marshal and unmarshal functions",
@@ -79,9 +82,10 @@ ssz_gen_marshal = rule(
cfg = "exec",
),
"objs": attr.string_list(),
"exclude_objs": attr.string_list(),
"includes": attr.label_list(providers = [GoLibrary]),
"out": attr.output(),
},
outputs = {"out": "generated.ssz.go"},
)
SSZ_DEPS = ["@com_github_prysmaticlabs_fastssz//:go_default_library"]

View File

@@ -43,13 +43,14 @@ func selectAccounts(selectionPrompt string, pubKeys [][fieldparams.BLSPubkeyLeng
results := make([]int, 0)
au := aurora.NewAurora(true)
if len(pubKeyStrings) > 5 {
log.Warnf("there are more than %d potential public keys to exit, please consider using the --%s or --%s flags", 5, flags.VoluntaryExitPublicKeysFlag.Name, flags.ExitAllFlag.Name)
log.Warnf("There are more than %d potential public keys to exit, please consider using the --%s or --%s flags", 5, flags.VoluntaryExitPublicKeysFlag.Name, flags.ExitAllFlag.Name)
}
log.Infof("Found a total of %d keys", len(pubKeyStrings))
for result != exit {
p := promptui.Select{
Label: selectionPrompt,
HideSelected: true,
Size: len(pubKeyStrings),
Size: 10, // Display 10 items at a time.
Items: append([]string{exit, allAccountsText}, pubKeyStrings...),
Templates: templates,
}

View File

@@ -138,6 +138,9 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
// Signs input slot with domain selection proof. This is used to create the signature for aggregator selection.
func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) (signature []byte, err error) {
ctx, span := trace.StartSpan(ctx, "validator.signSlotWithSelectionProof")
defer span.End()
domain, err := v.domainData(ctx, slots.ToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:])
if err != nil {
return nil, err
@@ -194,6 +197,9 @@ func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slo
// This returns the signature of validator signing over aggregate and
// proof object.
func (v *validator) aggregateAndProofSig(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, agg *ethpb.AggregateAttestationAndProof, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.aggregateAndProofSig")
defer span.End()
d, err := v.domainData(ctx, slots.ToEpoch(agg.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:])
if err != nil {
return nil, err

View File

@@ -199,6 +199,9 @@ func (v *validator) duty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.Dutie
// Given validator's public key, this function returns the signature of an attestation data and its signing root.
func (v *validator) signAtt(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, data *ethpb.AttestationData, slot primitives.Slot) ([]byte, [32]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signAtt")
defer span.End()
domain, root, err := v.domainAndSigningRoot(ctx, data)
if err != nil {
return nil, [32]byte{}, err

Some files were not shown because too many files have changed in this diff Show More