mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
6 Commits
use-method
...
Stale_PR_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f265eb7a84 | ||
|
|
6562036c54 | ||
|
|
b8aad84285 | ||
|
|
5f0d6074d6 | ||
|
|
9d6a2f5390 | ||
|
|
490ddbf782 |
27
.github/workflows/stale_pr_checker.yml
vendored
Normal file
27
.github/workflows/stale_pr_checker.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Find stale PRs
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 13 * * 1'
|
||||
|
||||
jobs:
|
||||
fetch-PRs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fetch pull requests from here
|
||||
id: local
|
||||
uses: paritytech/stale-pr-finder@main
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
repo: prysm
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
with:
|
||||
channel-id: ${{ secrets.CHANNEL }}
|
||||
slack-message: |
|
||||
Stale PRs this week:
|
||||
${{ steps.local.outputs.message }}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
LOCAL_PR: ${{ steps.local.outputs.message }}"
|
||||
13
BUILD.bazel
13
BUILD.bazel
@@ -1,4 +1,3 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
@@ -225,6 +224,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/defers:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/directive:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
|
||||
# fieldalignment disabled
|
||||
#"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
|
||||
@@ -287,14 +287,3 @@ sh_binary(
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["derp.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
@@ -68,7 +68,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
|
||||
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -295,14 +295,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
for _, index := range preActivationIndices {
|
||||
if err := helpers.QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure early adopters of compounding credentials go through the activation churn
|
||||
for _, index := range compoundWithdrawalIndices {
|
||||
if err := helpers.QueueExcessActiveBalance(post, index); err != nil {
|
||||
if err := QueueExcessActiveBalance(post, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue excess active balance")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -32,12 +31,12 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(ctx, s, idx)
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queueExcessActiveBalance
|
||||
// QueueExcessActiveBalance
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
@@ -49,7 +48,7 @@ func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -80,7 +79,7 @@ func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx prim
|
||||
// )
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,7 +2,6 @@ package electra_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
@@ -34,10 +34,10 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(context.TODO(), s, 0))
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 1))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
@@ -50,7 +50,7 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 2))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
@@ -74,7 +74,7 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(context.TODO(), s, 0))
|
||||
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(s, 0))
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), b, "balance was not changed")
|
||||
@@ -88,3 +88,57 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
vals := st.Validators()
|
||||
vals[0].WithdrawalCredentials = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1010
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
bytes.HasPrefix(val.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := electra.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
// need to manually set this to 0 as after 6110 these balances are now 0 and instead populates pending balance deposits
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance - 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
|
||||
bal, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), bal)
|
||||
}
|
||||
|
||||
@@ -674,68 +674,3 @@ func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
}
|
||||
return params.BeaconConfig().MinActivationBalance
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// validator = state.validators[index]
|
||||
// state.balances[index] = 0
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.EffectiveBalance = 0
|
||||
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestIsActiveValidator_OK(t *testing.T) {
|
||||
@@ -1120,40 +1119,3 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
err := helpers.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
|
||||
bals = st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance)
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd))
|
||||
err = helpers.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err = st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
|
||||
val, err = st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), val.EffectiveBalance)
|
||||
}
|
||||
|
||||
@@ -21,12 +21,13 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -16,9 +16,10 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
@@ -39,14 +40,15 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -32,28 +34,28 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
|
||||
defer span.End()
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(unaggregatedAtts))
|
||||
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(unaggregatedAtts))
|
||||
for _, att := range unaggregatedAtts {
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
|
||||
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
|
||||
}
|
||||
|
||||
// Aggregate unaggregated attestations from the pool and save them in the pool.
|
||||
// Track the unaggregated attestations that aren't able to aggregate.
|
||||
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
|
||||
leftOverUnaggregatedAtt := make(map[attestation.Id]bool)
|
||||
|
||||
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
|
||||
leftOverUnaggregatedAtt = c.aggregateParallel(attsByVerAndDataRoot, leftOverUnaggregatedAtt)
|
||||
|
||||
// Remove the unaggregated attestations from the pool that were successfully aggregated.
|
||||
for _, att := range unaggregatedAtts {
|
||||
h, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
if leftOverUnaggregatedAtt[h] {
|
||||
if leftOverUnaggregatedAtt[id] {
|
||||
continue
|
||||
}
|
||||
if err := c.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
@@ -66,7 +68,7 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
|
||||
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
|
||||
// returns the unaggregated attestations that weren't able to aggregate.
|
||||
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
|
||||
func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver map[[32]byte]bool) map[[32]byte]bool {
|
||||
func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftOver map[attestation.Id]bool) map[attestation.Id]bool {
|
||||
var leftoverLock sync.Mutex
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@@ -92,13 +94,13 @@ func (c *AttCaches) aggregateParallel(atts map[[32]byte][]ethpb.Att, leftOver ma
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
h, err := hashFn(aggregated)
|
||||
id, err := attestation.NewId(aggregated, attestation.Full)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not hash attestation")
|
||||
log.WithError(err).Error("Could not create attestation ID")
|
||||
continue
|
||||
}
|
||||
leftoverLock.Lock()
|
||||
leftOver[h] = true
|
||||
leftOver[id] = true
|
||||
leftoverLock.Unlock()
|
||||
}
|
||||
}
|
||||
@@ -139,17 +141,18 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
copiedAtt := att.Copy()
|
||||
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
atts, ok := c.aggregatedAtt[r]
|
||||
atts, ok := c.aggregatedAtt[id]
|
||||
if !ok {
|
||||
atts := []ethpb.Att{copiedAtt}
|
||||
c.aggregatedAtt[r] = atts
|
||||
c.aggregatedAtt[id] = atts
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,7 +160,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.aggregatedAtt[r] = atts
|
||||
c.aggregatedAtt[id] = atts
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -191,17 +194,56 @@ func (c *AttCaches) AggregatedAttestations() []ethpb.Att {
|
||||
|
||||
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]ethpb.Att, 0)
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
for _, a := range c.aggregatedAtt {
|
||||
if slot == a[0].GetData().Slot && committeeIndex == a[0].GetData().CommitteeIndex {
|
||||
atts = append(atts, a...)
|
||||
for _, as := range c.aggregatedAtt {
|
||||
if as[0].Version() == version.Phase0 && slot == as[0].GetData().Slot && committeeIndex == as[0].GetData().CommitteeIndex {
|
||||
for _, a := range as {
|
||||
att, ok := a.(*ethpb.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return atts
|
||||
}
|
||||
|
||||
// AggregatedAttestationsBySlotIndexElectra returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndexElectra(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.AttestationElectra {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.AttestationElectra, 0)
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
for _, as := range c.aggregatedAtt {
|
||||
if as[0].Version() == version.Electra && slot == as[0].GetData().Slot && as[0].CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
||||
for _, a := range as {
|
||||
att, ok := a.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,18 +258,19 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
||||
if !helpers.IsAggregated(att) {
|
||||
return errors.New("attestation is not aggregated")
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation data")
|
||||
}
|
||||
|
||||
if err := c.insertSeenBit(att); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.aggregatedAttLock.Lock()
|
||||
defer c.aggregatedAttLock.Unlock()
|
||||
attList, ok := c.aggregatedAtt[r]
|
||||
attList, ok := c.aggregatedAtt[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
@@ -241,9 +284,9 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
delete(c.aggregatedAtt, r)
|
||||
delete(c.aggregatedAtt, id)
|
||||
} else {
|
||||
c.aggregatedAtt[r] = filtered
|
||||
c.aggregatedAtt[id] = filtered
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -254,14 +297,15 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return false, err
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not tree hash attestation")
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.aggregatedAttLock.RLock()
|
||||
defer c.aggregatedAttLock.RUnlock()
|
||||
if atts, ok := c.aggregatedAtt[r]; ok {
|
||||
if atts, ok := c.aggregatedAtt[id]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
@@ -273,7 +317,7 @@ func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
||||
|
||||
c.blockAttLock.RLock()
|
||||
defer c.blockAttLock.RUnlock()
|
||||
if atts, ok := c.blockAtt[r]; ok {
|
||||
if atts, ok := c.blockAtt[id]; ok {
|
||||
for _, a := range atts {
|
||||
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
|
||||
c "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -69,7 +70,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b10111},
|
||||
},
|
||||
wantErrString: "could not tree hash attestation: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")",
|
||||
wantErrString: "could not create attestation ID",
|
||||
},
|
||||
{
|
||||
name: "already seen",
|
||||
@@ -92,15 +93,13 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
count: 1,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 100,
|
||||
}))
|
||||
id, err := attestation.NewId(util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100}}), attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
|
||||
|
||||
err := cache.SaveAggregatedAttestation(tt.att)
|
||||
@@ -230,7 +229,7 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
err := cache.DeleteAggregatedAttestation(att)
|
||||
wantErr := "could not tree hash attestation data: --.BeaconBlockRoot (" + fssz.ErrBytesLength.Error() + ")"
|
||||
wantErr := "could not create attestation ID"
|
||||
assert.ErrorContains(t, wantErr, err)
|
||||
})
|
||||
|
||||
@@ -500,3 +499,49 @@ func TestKV_Aggregated_DuplicateAggregatedAttestations(t *testing.T) {
|
||||
assert.DeepSSZEqual(t, att2, returned[0], "Did not receive correct aggregated atts")
|
||||
assert.Equal(t, 1, len(returned), "Did not receive correct aggregated atts")
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_AggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1011}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.AggregatedAttestationsBySlotIndex(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndex(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_AggregatedAttestationsBySlotIndexElectra(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1011}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(2, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att3 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}, CommitteeBits: committeeBits})
|
||||
atts := []*ethpb.AttestationElectra{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveAggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
|
||||
returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// SaveBlockAttestation saves an block attestation in cache.
|
||||
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.blockAttLock.Lock()
|
||||
defer c.blockAttLock.Unlock()
|
||||
atts, ok := c.blockAtt[r]
|
||||
atts, ok := c.blockAtt[id]
|
||||
if !ok {
|
||||
atts = make([]ethpb.Att, 0, 1)
|
||||
}
|
||||
@@ -31,7 +33,7 @@ func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
}
|
||||
}
|
||||
|
||||
c.blockAtt[r] = append(atts, att.Copy())
|
||||
c.blockAtt[id] = append(atts, att.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -54,14 +56,15 @@ func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att.GetData())
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.blockAttLock.Lock()
|
||||
defer c.blockAttLock.Unlock()
|
||||
delete(c.blockAtt, r)
|
||||
delete(c.blockAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
|
||||
@@ -10,14 +11,15 @@ func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att)
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.forkchoiceAttLock.Lock()
|
||||
defer c.forkchoiceAttLock.Unlock()
|
||||
c.forkchoiceAtt[r] = att.Copy()
|
||||
c.forkchoiceAtt[id] = att
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -51,14 +53,15 @@ func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
r, err := hashFn(att)
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.forkchoiceAttLock.Lock()
|
||||
defer c.forkchoiceAttLock.Unlock()
|
||||
delete(c.forkchoiceAtt, r)
|
||||
delete(c.forkchoiceAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,24 +9,22 @@ import (
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
var hashFn = hash.Proto
|
||||
|
||||
// AttCaches defines the caches used to satisfy attestation pool interface.
|
||||
// These caches are KV store for various attestations
|
||||
// such are unaggregated, aggregated or attestations within a block.
|
||||
type AttCaches struct {
|
||||
aggregatedAttLock sync.RWMutex
|
||||
aggregatedAtt map[[32]byte][]ethpb.Att
|
||||
aggregatedAtt map[attestation.Id][]ethpb.Att
|
||||
unAggregateAttLock sync.RWMutex
|
||||
unAggregatedAtt map[[32]byte]ethpb.Att
|
||||
unAggregatedAtt map[attestation.Id]ethpb.Att
|
||||
forkchoiceAttLock sync.RWMutex
|
||||
forkchoiceAtt map[[32]byte]ethpb.Att
|
||||
forkchoiceAtt map[attestation.Id]ethpb.Att
|
||||
blockAttLock sync.RWMutex
|
||||
blockAtt map[[32]byte][]ethpb.Att
|
||||
blockAtt map[attestation.Id][]ethpb.Att
|
||||
seenAtt *cache.Cache
|
||||
}
|
||||
|
||||
@@ -36,10 +34,10 @@ func NewAttCaches() *AttCaches {
|
||||
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
c := cache.New(secsInEpoch*time.Second, 2*secsInEpoch*time.Second)
|
||||
pool := &AttCaches{
|
||||
unAggregatedAtt: make(map[[32]byte]ethpb.Att),
|
||||
aggregatedAtt: make(map[[32]byte][]ethpb.Att),
|
||||
forkchoiceAtt: make(map[[32]byte]ethpb.Att),
|
||||
blockAtt: make(map[[32]byte][]ethpb.Att),
|
||||
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
|
||||
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
|
||||
forkchoiceAtt: make(map[attestation.Id]ethpb.Att),
|
||||
blockAtt: make(map[attestation.Id][]ethpb.Att),
|
||||
seenAtt: c,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +1,20 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
v, ok := c.seenAtt.Get(string(r[:]))
|
||||
v, ok := c.seenAtt.Get(id.String())
|
||||
if ok {
|
||||
seenBits, ok := v.([]bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -24,7 +23,7 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
alreadyExists := false
|
||||
for _, bit := range seenBits {
|
||||
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
|
||||
return fmt.Errorf("failed to check seen bits on attestation when inserting bit: %w", err)
|
||||
return err
|
||||
} else if c {
|
||||
alreadyExists = true
|
||||
break
|
||||
@@ -33,21 +32,21 @@ func (c *AttCaches) insertSeenBit(att ethpb.Att) error {
|
||||
if !alreadyExists {
|
||||
seenBits = append(seenBits, att.GetAggregationBits())
|
||||
}
|
||||
c.seenAtt.Set(string(r[:]), seenBits, cache.DefaultExpiration /* one epoch */)
|
||||
c.seenAtt.Set(id.String(), seenBits, cache.DefaultExpiration /* one epoch */)
|
||||
return nil
|
||||
}
|
||||
|
||||
c.seenAtt.Set(string(r[:]), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
|
||||
c.seenAtt.Set(id.String(), []bitfield.Bitlist{att.GetAggregationBits()}, cache.DefaultExpiration /* one epoch */)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
|
||||
r, err := hashFn(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
v, ok := c.seenAtt.Get(string(r[:]))
|
||||
v, ok := c.seenAtt.Get(id.String())
|
||||
if ok {
|
||||
seenBits, ok := v.([]bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -55,7 +54,7 @@ func (c *AttCaches) hasSeenBit(att ethpb.Att) (bool, error) {
|
||||
}
|
||||
for _, bit := range seenBits {
|
||||
if c, err := bit.Contains(att.GetAggregationBits()); err != nil {
|
||||
return false, fmt.Errorf("failed to check seen bits on attestation when reading bit: %w", err)
|
||||
return false, err
|
||||
} else if c {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -39,18 +40,18 @@ func TestAttCaches_hasSeenBit(t *testing.T) {
|
||||
func TestAttCaches_insertSeenBitDuplicates(t *testing.T) {
|
||||
c := NewAttCaches()
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
r, err := hashFn(att1.Data)
|
||||
id, err := attestation.NewId(att1, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.insertSeenBit(att1))
|
||||
require.Equal(t, 1, c.seenAtt.ItemCount())
|
||||
|
||||
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(string(r[:]))
|
||||
_, expirationTime1, ok := c.seenAtt.GetWithExpiration(id.String())
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// Make sure that duplicates are not inserted, but expiration time gets updated.
|
||||
require.NoError(t, c.insertSeenBit(att1))
|
||||
require.Equal(t, 1, c.seenAtt.ItemCount())
|
||||
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(string(r[:]))
|
||||
_, expirationprysmTime, ok := c.seenAtt.GetWithExpiration(id.String())
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, expirationprysmTime.After(expirationTime1), "Expiration time is not updated")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -27,13 +29,14 @@ func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
c.unAggregatedAtt[r] = att.Copy()
|
||||
c.unAggregatedAtt[id] = att
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -69,19 +72,56 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att {
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]ethpb.Att, 0)
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
for _, a := range unAggregatedAtts {
|
||||
if slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
|
||||
atts = append(atts, a)
|
||||
if a.Version() == version.Phase0 && slot == a.GetData().Slot && committeeIndex == a.GetData().CommitteeIndex {
|
||||
att, ok := a.(*ethpb.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return atts
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndexElectra returns the unaggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) []*ethpb.AttestationElectra {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.AttestationElectra, 0)
|
||||
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
for _, a := range unAggregatedAtts {
|
||||
if a.Version() == version.Electra && slot == a.GetData().Slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
||||
att, ok := a.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if ok {
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,14 +141,14 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := hashFn(att)
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash attestation")
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
delete(c.unAggregatedAtt, r)
|
||||
delete(c.unAggregatedAtt, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"testing"
|
||||
|
||||
c "github.com/patrickmn/go-cache"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -39,7 +40,7 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
BeaconBlockRoot: []byte{0b0},
|
||||
},
|
||||
},
|
||||
wantErrString: fssz.ErrBytesLength.Error(),
|
||||
wantErrString: "could not create attestation ID",
|
||||
},
|
||||
{
|
||||
name: "normal save",
|
||||
@@ -57,13 +58,13 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
count: 0,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{Slot: 100}))
|
||||
id, err := attestation.NewId(util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100}}), attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
cache.seenAtt.Set(string(r[:]), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0xff}}, c.DefaultExpiration)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Invalid start pool, atts: %d", len(cache.unAggregatedAtt))
|
||||
|
||||
if tt.att != nil && tt.att.GetSignature() == nil {
|
||||
@@ -246,9 +247,35 @@ func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []ethpb.Att{att1}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []ethpb.Att{att2}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att2}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []ethpb.Att{att3}, returned)
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att3}, returned)
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndexElectra(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(2, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
|
||||
committeeBits = primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(1, true)
|
||||
att3 := util.HydrateAttestationElectra(ðpb.AttestationElectra{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110}, CommitteeBits: committeeBits})
|
||||
atts := []*ethpb.AttestationElectra{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
}
|
||||
ctx := context.Background()
|
||||
returned := cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 2)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att2}, returned)
|
||||
returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 2, 1)
|
||||
assert.DeepEqual(t, []*ethpb.AttestationElectra{att3}, returned)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ type Pool interface {
|
||||
SaveAggregatedAttestation(att ethpb.Att) error
|
||||
SaveAggregatedAttestations(atts []ethpb.Att) error
|
||||
AggregatedAttestations() []ethpb.Att
|
||||
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
|
||||
AggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
|
||||
AggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
|
||||
DeleteAggregatedAttestation(att ethpb.Att) error
|
||||
HasAggregatedAttestation(att ethpb.Att) (bool, error)
|
||||
AggregatedAttestationCount() int
|
||||
@@ -26,7 +27,8 @@ type Pool interface {
|
||||
SaveUnaggregatedAttestation(att ethpb.Att) error
|
||||
SaveUnaggregatedAttestations(atts []ethpb.Att) error
|
||||
UnaggregatedAttestations() ([]ethpb.Att, error)
|
||||
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []ethpb.Att
|
||||
UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation
|
||||
UnaggregatedAttestationsBySlotIndexElectra(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.AttestationElectra
|
||||
DeleteUnaggregatedAttestation(att ethpb.Att) error
|
||||
DeleteSeenUnaggregatedAttestations() (int, error)
|
||||
UnaggregatedAttestationCount() int
|
||||
|
||||
@@ -3,14 +3,14 @@ package attestations
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -67,7 +67,7 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
|
||||
atts := append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
|
||||
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]ethpb.Att, len(atts))
|
||||
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(atts))
|
||||
|
||||
// Consolidate attestations by aggregating them by similar data root.
|
||||
for _, att := range atts {
|
||||
@@ -79,14 +79,14 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
attDataRoot, err := att.GetData().HashTreeRoot()
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
|
||||
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
|
||||
}
|
||||
|
||||
for _, atts := range attsByDataRoot {
|
||||
for _, atts := range attsByVerAndDataRoot {
|
||||
if err := s.aggregateAndSaveForkChoiceAtts(atts); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,12 +119,12 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []ethpb.Att) error {
|
||||
// This checks if the attestation has previously been aggregated for fork choice
|
||||
// return true if yes, false if no.
|
||||
func (s *Service) seen(att ethpb.Att) (bool, error) {
|
||||
attRoot, err := hash.Proto(att.GetData())
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "could not create attestation ID")
|
||||
}
|
||||
incomingBits := att.GetAggregationBits()
|
||||
savedBits, ok := s.forkChoiceProcessedRoots.Get(attRoot)
|
||||
savedBits, ok := s.forkChoiceProcessedAtts.Get(id)
|
||||
if ok {
|
||||
savedBitlist, ok := savedBits.(bitfield.Bitlist)
|
||||
if !ok {
|
||||
@@ -149,6 +149,6 @@ func (s *Service) seen(att ethpb.Att) (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
s.forkChoiceProcessedRoots.Add(attRoot, incomingBits)
|
||||
s.forkChoiceProcessedAtts.Add(id, incomingBits)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -13,16 +13,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
var forkChoiceProcessedRootsSize = 1 << 16
|
||||
var forkChoiceProcessedAttsSize = 1 << 16
|
||||
|
||||
// Service of attestation pool operations.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
err error
|
||||
forkChoiceProcessedRoots *lru.Cache
|
||||
genesisTime uint64
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
err error
|
||||
forkChoiceProcessedAtts *lru.Cache
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
@@ -35,7 +35,7 @@ type Config struct {
|
||||
// NewService instantiates a new attestation pool service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
cache := lruwrpr.New(forkChoiceProcessedRootsSize)
|
||||
cache := lruwrpr.New(forkChoiceProcessedAttsSize)
|
||||
|
||||
if cfg.pruneInterval == 0 {
|
||||
// Prune expired attestations from the pool every slot interval.
|
||||
@@ -44,10 +44,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
forkChoiceProcessedRoots: cache,
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
forkChoiceProcessedAtts: cache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
@@ -100,12 +99,8 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
|
||||
best = aggregatedAtt
|
||||
}
|
||||
}
|
||||
att, ok := best.(*ethpb.Attestation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("best attestation has wrong type (expected %T, got %T)", ðpb.Attestation{}, best)
|
||||
}
|
||||
a := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: att,
|
||||
Aggregate: best,
|
||||
SelectionProof: req.SlotSignature,
|
||||
AggregatorIndex: validatorIndex,
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cmd/prysmctl/checkpointsync:go_default_library",
|
||||
"//cmd/prysmctl/codegen:go_default_library",
|
||||
"//cmd/prysmctl/db:go_default_library",
|
||||
"//cmd/prysmctl/p2p:go_default_library",
|
||||
"//cmd/prysmctl/testnet:go_default_library",
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cmd.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_offchainlabs_methodical_ssz//cmd/ssz/commands:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
package codegen
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/methodical-ssz/cmd/ssz/commands"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "ssz",
|
||||
Usage: "ssz code generation utilities",
|
||||
Subcommands: commands.All,
|
||||
},
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/checkpointsync"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/testnet"
|
||||
@@ -33,5 +32,4 @@ func init() {
|
||||
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, weaksubjectivity.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, validator.Commands...)
|
||||
prysmctlCommands = append(prysmctlCommands, codegen.Commands...)
|
||||
}
|
||||
|
||||
44
deps.bzl
44
deps.bzl
@@ -678,12 +678,6 @@ def prysm_deps():
|
||||
sum = "h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=",
|
||||
version = "v1.5.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_dave_jennifer",
|
||||
importpath = "github.com/dave/jennifer",
|
||||
sum = "h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_davecgh_go_spew",
|
||||
importpath = "github.com/davecgh/go-spew",
|
||||
@@ -2592,12 +2586,6 @@ def prysm_deps():
|
||||
sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
|
||||
version = "v0.0.0-20170623195520-56545f4a5d46",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_offchainlabs_methodical_ssz",
|
||||
importpath = "github.com/OffchainLabs/methodical-ssz",
|
||||
sum = "h1:56W3xtZyeLcbKjt5staxK/jMdE6ql69rPhbb/XmZPhA=",
|
||||
version = "v0.0.0-20240712201410-cd5a2975775c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_oklog_oklog",
|
||||
importpath = "github.com/oklog/oklog",
|
||||
@@ -4810,8 +4798,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_crypto",
|
||||
importpath = "golang.org/x/crypto",
|
||||
sum = "h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=",
|
||||
version = "v0.25.0",
|
||||
sum = "h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=",
|
||||
version = "v0.23.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_exp",
|
||||
@@ -4846,14 +4834,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_mod",
|
||||
importpath = "golang.org/x/mod",
|
||||
sum = "h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=",
|
||||
version = "v0.19.0",
|
||||
sum = "h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=",
|
||||
version = "v0.17.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=",
|
||||
version = "v0.27.0",
|
||||
sum = "h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=",
|
||||
version = "v0.25.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_oauth2",
|
||||
@@ -4876,26 +4864,26 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_sys",
|
||||
importpath = "golang.org/x/sys",
|
||||
sum = "h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=",
|
||||
version = "v0.22.0",
|
||||
sum = "h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=",
|
||||
version = "v0.20.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_telemetry",
|
||||
importpath = "golang.org/x/telemetry",
|
||||
sum = "h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=",
|
||||
version = "v0.0.0-20240521205824-bda55230c457",
|
||||
sum = "h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY=",
|
||||
version = "v0.0.0-20240228155512-f48c80bd79b2",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_term",
|
||||
importpath = "golang.org/x/term",
|
||||
sum = "h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=",
|
||||
version = "v0.22.0",
|
||||
sum = "h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=",
|
||||
version = "v0.20.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_text",
|
||||
importpath = "golang.org/x/text",
|
||||
sum = "h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=",
|
||||
version = "v0.16.0",
|
||||
sum = "h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=",
|
||||
version = "v0.15.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_time",
|
||||
@@ -4906,8 +4894,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_tools",
|
||||
importpath = "golang.org/x/tools",
|
||||
sum = "h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=",
|
||||
version = "v0.23.0",
|
||||
sum = "h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=",
|
||||
version = "v0.21.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_xerrors",
|
||||
|
||||
18
go.mod
18
go.mod
@@ -8,7 +8,6 @@ require (
|
||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c
|
||||
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
@@ -89,11 +88,11 @@ require (
|
||||
go.opencensus.io v0.24.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/mock v0.4.0
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/mod v0.19.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/tools v0.23.0
|
||||
golang.org/x/tools v0.21.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.56.3
|
||||
google.golang.org/protobuf v1.34.1
|
||||
@@ -126,7 +125,6 @@ require (
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/dave/jennifer v1.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.5.0 // indirect
|
||||
@@ -252,10 +250,10 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/term v0.22.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/term v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
@@ -275,7 +273,7 @@ require (
|
||||
github.com/go-playground/validator/v10 v10.13.0
|
||||
github.com/peterh/liner v1.2.0 // indirect
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
google.golang.org/api v0.44.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
k8s.io/klog/v2 v2.80.0 // indirect
|
||||
@@ -286,5 +284,3 @@ replace github.com/json-iterator/go => github.com/prestonvanloon/go v1.1.7-0.201
|
||||
|
||||
// See https://github.com/prysmaticlabs/grpc-gateway/issues/2
|
||||
replace github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446
|
||||
|
||||
//replace github.com/OffchainLabs/methodical-ssz => /Users/kasey/src/OffchainLabs/methodical-ssz
|
||||
|
||||
32
go.sum
32
go.sum
@@ -73,8 +73,6 @@ github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97/go.mo
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c h1:56W3xtZyeLcbKjt5staxK/jMdE6ql69rPhbb/XmZPhA=
|
||||
github.com/OffchainLabs/methodical-ssz v0.0.0-20240712201410-cd5a2975775c/go.mod h1:Ndc6p2Xsj0fV3Mx4Ufv32RZ4K9SvWhzhOxtMuTXDU1I=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@@ -208,8 +206,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||
github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo=
|
||||
github.com/dave/jennifer v1.7.0 h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE=
|
||||
github.com/dave/jennifer v1.7.0/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -1242,8 +1238,8 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -1286,8 +1282,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1348,8 +1344,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1485,8 +1481,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -1498,8 +1494,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1515,8 +1511,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1595,8 +1591,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
# Script to copy ssz.go files from bazel build folder to appropriate location.
|
||||
# Bazel builds to bazel-bin/... folder, script copies them back to original folder where target is.
|
||||
|
||||
#bazel query 'kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
bazel query 'kind(ssz_methodical, //proto/...) union kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
bazel query 'kind(ssz_gen_marshal, //proto/...)' | xargs bazel build $@
|
||||
|
||||
# Get locations of proto ssz.go files.
|
||||
file_list=()
|
||||
|
||||
@@ -12,7 +12,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
load("//proto:ssz_proto_library.bzl", "ssz_proto_files")
|
||||
load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
load("//tools:methodical.bzl", "ssz_methodical")
|
||||
|
||||
proto_library(
|
||||
name = "proto",
|
||||
@@ -53,7 +52,6 @@ ssz_phase0_objs = [
|
||||
"AttestationData",
|
||||
"AttesterSlashing",
|
||||
"BeaconBlock",
|
||||
"BeaconBlockBody",
|
||||
"BeaconBlockHeader",
|
||||
"BeaconState",
|
||||
"Checkpoint",
|
||||
@@ -66,7 +64,6 @@ ssz_phase0_objs = [
|
||||
"ForkData",
|
||||
"HistoricalBatch",
|
||||
"IndexedAttestation",
|
||||
"PendingAttestation",
|
||||
"PowBlock",
|
||||
"ProposerSlashing",
|
||||
"SignedAggregateAttestationAndProof",
|
||||
@@ -75,6 +72,7 @@ ssz_phase0_objs = [
|
||||
"SignedVoluntaryExit",
|
||||
"SigningData",
|
||||
"Status",
|
||||
"Status",
|
||||
"Validator",
|
||||
"VoluntaryExit",
|
||||
]
|
||||
@@ -87,6 +85,7 @@ ssz_altair_objs = [
|
||||
"SignedBeaconBlockAltair",
|
||||
"SignedContributionAndProof",
|
||||
"SyncAggregate",
|
||||
"SyncAggregate",
|
||||
"SyncAggregatorSelectionData",
|
||||
"SyncCommittee",
|
||||
"SyncCommitteeContribution",
|
||||
@@ -115,6 +114,7 @@ ssz_capella_objs = [
|
||||
"SignedBLSToExecutionChange",
|
||||
"SignedBeaconBlockCapella",
|
||||
"SignedBlindedBeaconBlockCapella",
|
||||
"Withdrawal",
|
||||
]
|
||||
|
||||
ssz_deneb_objs = [
|
||||
@@ -138,13 +138,14 @@ ssz_electra_objs = [
|
||||
"AttestationElectra",
|
||||
"AttesterSlashingElectra",
|
||||
"BeaconBlockElectra",
|
||||
"BeaconBlockBodyElectra",
|
||||
"BeaconBlockElectra",
|
||||
"BeaconStateElectra",
|
||||
"BlindedBeaconBlockBodyElectra",
|
||||
"BlindedBeaconBlockElectra",
|
||||
"Consolidation",
|
||||
"IndexedAttestationElectra",
|
||||
"PendingBalanceDeposit",
|
||||
"PendingBalanceDeposits",
|
||||
"PendingConsolidation",
|
||||
"PendingPartialWithdrawal",
|
||||
"SignedAggregateAttestationAndProofElectra",
|
||||
@@ -153,124 +154,83 @@ ssz_electra_objs = [
|
||||
"SignedConsolidation",
|
||||
]
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_phase0",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_phase0_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_phase0",
|
||||
go_proto = ":go_proto",
|
||||
out = "phase0.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_phase0_objs,
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_altair",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_altair_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_altair",
|
||||
go_proto = ":go_proto",
|
||||
out = "altair.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_altair_objs,
|
||||
exclude_objs = ssz_phase0_objs,
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_bellatrix",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_bellatrix_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_bellatrix",
|
||||
go_proto = ":go_proto",
|
||||
out = "bellatrix.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_bellatrix_objs,
|
||||
exclude_objs = ssz_phase0_objs + ssz_altair_objs,
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_capella",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_capella_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_capella",
|
||||
go_proto = ":go_proto",
|
||||
out = "capella.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_capella_objs,
|
||||
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs,
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_deneb",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_deneb_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_deneb",
|
||||
go_proto = ":go_proto",
|
||||
out = "deneb.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_deneb_objs,
|
||||
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs,
|
||||
)
|
||||
|
||||
ssz_methodical(
|
||||
name = "methodical_electra",
|
||||
deps = [":go_proto"],
|
||||
type_names = ssz_electra_objs,
|
||||
target_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1",
|
||||
target_package_name = "eth",
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_electra",
|
||||
go_proto = ":go_proto",
|
||||
out = "electra.ssz.go",
|
||||
includes = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//math:go_default_library",
|
||||
],
|
||||
objs = ssz_electra_objs,
|
||||
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs,
|
||||
)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_altair",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "altair.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_altair_objs,
|
||||
# exclude_objs = ssz_phase0_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_bellatrix",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "bellatrix.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_bellatrix_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_capella",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "capella.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_capella_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_deneb",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "deneb.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_deneb_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs,
|
||||
#)
|
||||
|
||||
#ssz_gen_marshal(
|
||||
# name = "ssz_generated_electra",
|
||||
# go_proto = ":go_proto",
|
||||
# out = "electra.ssz.go",
|
||||
# includes = [
|
||||
# "//consensus-types/primitives:go_default_library",
|
||||
# "//proto/engine/v1:go_default_library",
|
||||
# "//math:go_default_library",
|
||||
# ],
|
||||
# objs = ssz_electra_objs,
|
||||
# exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs,
|
||||
#)
|
||||
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated_non_core",
|
||||
@@ -353,17 +313,12 @@ go_library(
|
||||
"sync_committee_mainnet.go",
|
||||
"sync_committee_minimal.go", # keep
|
||||
":ssz_generated_non_core", # keep
|
||||
#":ssz_generated_altair", # keep
|
||||
#":ssz_generated_bellatrix", # keep
|
||||
#":ssz_generated_capella", # keep
|
||||
#":ssz_generated_deneb", # keep
|
||||
#":ssz_generated_electra", # keep
|
||||
":methodical_phase0", # keep
|
||||
":methodical_altair", # keep
|
||||
":methodical_bellatrix", # keep
|
||||
":methodical_capella", # keep
|
||||
":methodical_deneb", # keep
|
||||
":methodical_electra", # keep
|
||||
":ssz_generated_phase0", # keep
|
||||
":ssz_generated_altair", # keep
|
||||
":ssz_generated_bellatrix", # keep
|
||||
":ssz_generated_capella", # keep
|
||||
":ssz_generated_deneb", # keep
|
||||
":ssz_generated_electra", # keep
|
||||
],
|
||||
embed = [
|
||||
":go_grpc_gateway_library",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,10 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["attestation_utils.go"],
|
||||
srcs = [
|
||||
"attestation_utils.go",
|
||||
"id.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -20,7 +24,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["attestation_utils_test.go"],
|
||||
srcs = [
|
||||
"attestation_utils_test.go",
|
||||
"id_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -29,6 +36,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation/aggregation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// MaxCoverAttestationAggregation relies on Maximum Coverage greedy algorithm for aggregation.
|
||||
@@ -171,11 +172,21 @@ func aggregateAttestations(atts []ethpb.Att, keys []int, coverage *bitfield.Bitl
|
||||
}
|
||||
}
|
||||
// Put aggregated attestation at a position of the first selected attestation.
|
||||
atts[targetIdx] = ðpb.Attestation{
|
||||
// Append size byte, which will be unnecessary on switch to Bitlist64.
|
||||
AggregationBits: coverage.ToBitlist(),
|
||||
Data: data,
|
||||
Signature: aggregateSignatures(signs).Marshal(),
|
||||
if atts[0].Version() == version.Phase0 {
|
||||
atts[targetIdx] = ðpb.Attestation{
|
||||
// Append size byte, which will be unnecessary on switch to Bitlist64.
|
||||
AggregationBits: coverage.ToBitlist(),
|
||||
Data: data,
|
||||
Signature: aggregateSignatures(signs).Marshal(),
|
||||
}
|
||||
} else {
|
||||
atts[targetIdx] = ðpb.AttestationElectra{
|
||||
// Append size byte, which will be unnecessary on switch to Bitlist64.
|
||||
AggregationBits: coverage.ToBitlist(),
|
||||
CommitteeBits: atts[0].CommitteeBitsVal().Bytes(),
|
||||
Data: data,
|
||||
Signature: aggregateSignatures(signs).Marshal(),
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
71
proto/prysm/v1alpha1/attestation/id.go
Normal file
71
proto/prysm/v1alpha1/attestation/id.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// IdSource represents the part of attestation that will be used to generate the Id.
|
||||
type IdSource uint8
|
||||
|
||||
const (
|
||||
// Full generates the Id from the whole attestation.
|
||||
Full IdSource = iota
|
||||
// Data generates the Id from the tuple (slot, committee index, beacon block root, source, target).
|
||||
Data
|
||||
)
|
||||
|
||||
// Id represents an attestation ID. Its uniqueness depends on the IdSource provided when constructing the Id.
|
||||
type Id [33]byte
|
||||
|
||||
// NewId --
|
||||
func NewId(att ethpb.Att, source IdSource) (Id, error) {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return Id{}, err
|
||||
}
|
||||
if att.Version() < 0 || att.Version() > 255 {
|
||||
return Id{}, errors.New("attestation version must be between 0 and 255")
|
||||
}
|
||||
|
||||
var id Id
|
||||
id[0] = byte(att.Version())
|
||||
|
||||
switch source {
|
||||
case Full:
|
||||
h, err := att.HashTreeRoot()
|
||||
if err != nil {
|
||||
return Id{}, err
|
||||
}
|
||||
copy(id[1:], h[:])
|
||||
return id, nil
|
||||
case Data:
|
||||
data := att.GetData()
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
if len(committeeIndices) != 1 {
|
||||
return Id{}, fmt.Errorf("%d committee bits are set instead of 1", len(committeeIndices))
|
||||
}
|
||||
dataCopy := ethpb.CopyAttestationData(att.GetData())
|
||||
dataCopy.CommitteeIndex = primitives.CommitteeIndex(committeeIndices[0])
|
||||
data = dataCopy
|
||||
}
|
||||
h, err := data.HashTreeRoot()
|
||||
if err != nil {
|
||||
return Id{}, err
|
||||
}
|
||||
copy(id[1:], h[:])
|
||||
return id, nil
|
||||
default:
|
||||
return Id{}, errors.New("invalid source requested")
|
||||
}
|
||||
}
|
||||
|
||||
// String --
|
||||
func (id Id) String() string {
|
||||
return string(id[:])
|
||||
}
|
||||
63
proto/prysm/v1alpha1/attestation/id_test.go
Normal file
63
proto/prysm/v1alpha1/attestation/id_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package attestation_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestNewId(t *testing.T) {
|
||||
t.Run("full source", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attestation.NewId(att, attestation.Full)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("data source Phase 0", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attestation.NewId(att, attestation.Data)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("data source Electra", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{CommitteeBits: cb})
|
||||
_, err := attestation.NewId(att, attestation.Data)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("ID is different between versions", func(t *testing.T) {
|
||||
phase0Att := util.HydrateAttestation(ðpb.Attestation{})
|
||||
phase0Id, err := attestation.NewId(phase0Att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true) // setting committee bit 0 for Electra corresponds to attestation data's committee index 0 for Phase 0
|
||||
electraAtt := util.HydrateAttestationElectra(ðpb.AttestationElectra{CommitteeBits: cb})
|
||||
electraId, err := attestation.NewId(electraAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEqual(t, phase0Id, electraId)
|
||||
})
|
||||
t.Run("invalid source", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attestation.NewId(att, 123)
|
||||
assert.ErrorContains(t, "invalid source requested", err)
|
||||
})
|
||||
t.Run("data source Electra - 0 bits set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{CommitteeBits: cb})
|
||||
_, err := attestation.NewId(att, attestation.Data)
|
||||
assert.ErrorContains(t, "0 committee bits are set", err)
|
||||
})
|
||||
t.Run("data source Electra - multiple bits set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{CommitteeBits: cb})
|
||||
_, err := attestation.NewId(att, attestation.Data)
|
||||
assert.ErrorContains(t, "2 committee bits are set", err)
|
||||
})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,38 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
alias(
|
||||
name = "methodicalgen",
|
||||
actual = "@com_github_offchainlabs_methodical_ssz//cmd/ssz:ssz",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bazel.go",
|
||||
"bazel_json_builder.go",
|
||||
"build_context.go",
|
||||
"driver_request.go",
|
||||
"flatpackage.go",
|
||||
"json_packages_driver.go",
|
||||
"main.go",
|
||||
"packageregistry.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "genception",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["gopackagesdriver_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@io_bazel_rules_go//go/tools/bazel_testing:go_default_library"],
|
||||
)
|
||||
@@ -1,16 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception/cmd",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//tools/genception/driver:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "cmd",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,86 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/tools/genception/driver"
|
||||
)
|
||||
|
||||
var log = driver.Logger
|
||||
|
||||
func run(_ context.Context, in io.Reader, out io.Writer, args []string) error {
|
||||
rec, err := driver.NewRecorder()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize recorder: %w", err)
|
||||
}
|
||||
resolver, err := driver.NewPathResolver()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize path resolver: %w", err)
|
||||
}
|
||||
jsonFiles, err := driver.LoadJsonListing()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup package: %w", err)
|
||||
}
|
||||
pd, err := driver.NewJSONPackagesDriver(jsonFiles, resolver.Resolve)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load JSON files: %w", err)
|
||||
}
|
||||
|
||||
request, err := driver.ReadDriverRequest(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read request: %w", err)
|
||||
}
|
||||
if err := rec.RecordRequest(args, request); err != nil {
|
||||
return fmt.Errorf("unable to record request: %w", err)
|
||||
}
|
||||
// Note: we are returning all files required to build a specific package.
|
||||
// For file queries (`file=`), this means that the CompiledGoFiles will
|
||||
// include more than the only file being specified.
|
||||
resp := pd.Handle(request, args)
|
||||
if err := rec.RecordResponse(resp); err != nil {
|
||||
return fmt.Errorf("unable to record response: %w", err)
|
||||
}
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal response: %v", err)
|
||||
}
|
||||
_, err = out.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
log.WithField("args", strings.Join(os.Args[1:], " ")).Info("genception lookup")
|
||||
if err := run(ctx, os.Stdin, os.Stdout, os.Args[1:]); err != nil {
|
||||
_, err := fmt.Fprintf(os.Stderr, "error: %v", err)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unhandled error in package resolution")
|
||||
}
|
||||
// gopls will check the packages driver exit code, and if there is an
|
||||
// error, it will fall back to go list. Obviously we don't want that,
|
||||
// so force a 0 exit code.
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bazel_json_builder.go",
|
||||
"build_context.go",
|
||||
"driver_request.go",
|
||||
"flatpackage.go",
|
||||
"index.go",
|
||||
"json_packages_driver.go",
|
||||
"logger.go",
|
||||
"packageregistry.go",
|
||||
"recorder.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/tools/genception/driver",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"index_test.go",
|
||||
"packageregistry_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
@@ -1,156 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var RulesGoStdlibLabel = "@io_bazel_rules_go//:stdlib"
|
||||
|
||||
/*
|
||||
type BazelJSONBuilder struct {
|
||||
packagesBaseDir string
|
||||
includeTests bool
|
||||
}
|
||||
|
||||
|
||||
var _defaultKinds = []string{"go_library", "go_test", "go_binary"}
|
||||
|
||||
var externalRe = regexp.MustCompile(`.*\/external\/([^\/]+)(\/(.*))?\/([^\/]+.go)`)
|
||||
|
||||
func (b *BazelJSONBuilder) fileQuery(filename string) string {
|
||||
label := filename
|
||||
|
||||
if strings.HasPrefix(filename, "./") {
|
||||
label = strings.TrimPrefix(filename, "./")
|
||||
}
|
||||
|
||||
if matches := externalRe.FindStringSubmatch(filename); len(matches) == 5 {
|
||||
// if filepath is for a third party lib, we need to know, what external
|
||||
// library this file is part of.
|
||||
matches = append(matches[:2], matches[3:]...)
|
||||
label = fmt.Sprintf("@%s//%s", matches[1], strings.Join(matches[2:], ":"))
|
||||
}
|
||||
|
||||
relToBin, err := filepath.Rel(b.bazel.info["output_path"], filename)
|
||||
if err == nil && !strings.HasPrefix(relToBin, "../") {
|
||||
parts := strings.SplitN(relToBin, string(filepath.Separator), 3)
|
||||
relToBin = parts[2]
|
||||
// We've effectively converted filename from bazel-bin/some/path.go to some/path.go;
|
||||
// Check if a BUILD.bazel files exists under this dir, if not walk up and repeat.
|
||||
relToBin = filepath.Dir(relToBin)
|
||||
_, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
|
||||
for errors.Is(err, os.ErrNotExist) && relToBin != "." {
|
||||
relToBin = filepath.Dir(relToBin)
|
||||
_, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// return package path found and build all targets (codegen doesn't fall under go_library)
|
||||
// Otherwise fallback to default
|
||||
if relToBin == "." {
|
||||
relToBin = ""
|
||||
}
|
||||
label = fmt.Sprintf("//%s:all", relToBin)
|
||||
}
|
||||
}
|
||||
|
||||
return label
|
||||
}
|
||||
|
||||
func isLocalImport(path string) bool {
|
||||
return path == "." || path == ".." ||
|
||||
strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") ||
|
||||
filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func NewBazelJSONBuilder(includeTests bool) (*BazelJSONBuilder, error) {
|
||||
return &BazelJSONBuilder{
|
||||
includeTests: includeTests,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BazelJSONBuilder) Labels(ctx context.Context, requests []string) ([]string, error) {
|
||||
ret := make([]string, 0, len(requests))
|
||||
for _, request := range requests {
|
||||
result := ""
|
||||
if strings.HasSuffix(request, ".go") {
|
||||
f := strings.TrimPrefix(request, "file=")
|
||||
result = b.fileQuery(f)
|
||||
} else if request == "builtin" || request == "std" {
|
||||
result = fmt.Sprintf(RulesGoStdlibLabel)
|
||||
}
|
||||
|
||||
if result != "" {
|
||||
ret = append(ret, result)
|
||||
}
|
||||
}
|
||||
if len(ret) == 0 {
|
||||
return []string{RulesGoStdlibLabel}, nil
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (b *BazelJSONBuilder) PathResolver() PathResolverFunc {
|
||||
return func(p string) string {
|
||||
p = strings.Replace(p, "__BAZEL_EXECROOT__", os.Getenv("PWD"), 1)
|
||||
p = strings.Replace(p, "__BAZEL_OUTPUT_BASE__", b.packagesBaseDir, 1)
|
||||
return p
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func NewPathResolver() (*PathResolver, error) {
|
||||
outBase, err := PackagesBaseFromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PathResolver{
|
||||
execRoot: os.Getenv("PWD"),
|
||||
outputBase: outBase,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type PathResolver struct {
|
||||
outputBase string
|
||||
execRoot string
|
||||
}
|
||||
|
||||
const (
|
||||
prefixExecRoot = "__BAZEL_EXECROOT__"
|
||||
prefixOutputBase = "__BAZEL_OUTPUT_BASE__"
|
||||
prefixWorkspace = "__BAZEL_WORKSPACE__"
|
||||
)
|
||||
|
||||
var prefixes = []string{prefixExecRoot, prefixOutputBase, prefixWorkspace}
|
||||
|
||||
func (r PathResolver) Resolve(path string) string {
|
||||
for _, prefix := range prefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
for _, rpl := range []string{r.execRoot, r.outputBase} {
|
||||
rp := strings.Replace(path, prefix, rpl, 1)
|
||||
_, err := os.Stat(rp)
|
||||
if err == nil {
|
||||
return rp
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
}
|
||||
log.WithField("path", path).Warn("unrecognized path prefix when resolving source paths in json import metadata")
|
||||
return path
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var buildContext = makeBuildContext()
|
||||
|
||||
func makeBuildContext() *build.Context {
|
||||
bctx := build.Default
|
||||
bctx.BuildTags = strings.Split(getenvDefault("GOTAGS", ""), ",")
|
||||
|
||||
return &bctx
|
||||
}
|
||||
|
||||
func filterSourceFilesForTags(files []string) []string {
|
||||
ret := make([]string, 0, len(files))
|
||||
|
||||
for _, f := range files {
|
||||
dir, filename := filepath.Split(f)
|
||||
ext := filepath.Ext(f)
|
||||
|
||||
match, err := buildContext.MatchFile(dir, filename)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", f).Warn("error matching file")
|
||||
}
|
||||
// MatchFile filters out anything without a file extension. In the
|
||||
// case of CompiledGoFiles (in particular gco processed files from
|
||||
// the cache), we want them.
|
||||
if match || ext == "" {
|
||||
ret = append(ret, f)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func getenvDefault(key, defaultValue string) string {
|
||||
if v, ok := os.LookupEnv(key); ok {
|
||||
return v
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// From https://pkg.go.dev/golang.org/x/tools/go/packages#LoadMode
|
||||
type LoadMode int
|
||||
|
||||
// Only NeedExportsFile is needed in our case
|
||||
const (
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
|
||||
// NeedFiles adds GoFiles and OtherFiles.
|
||||
NeedFiles
|
||||
|
||||
// NeedCompiledGoFiles adds CompiledGoFiles.
|
||||
NeedCompiledGoFiles
|
||||
|
||||
// NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
|
||||
// "placeholder" Packages with only the ID set.
|
||||
NeedImports
|
||||
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
|
||||
NeedDeps
|
||||
|
||||
// NeedExportsFile adds ExportFile.
|
||||
NeedExportFile
|
||||
|
||||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
NeedTypes
|
||||
|
||||
// NeedSyntax adds Syntax.
|
||||
NeedSyntax
|
||||
|
||||
// NeedTypesInfo adds TypesInfo.
|
||||
NeedTypesInfo
|
||||
|
||||
// NeedTypesSizes adds TypesSizes.
|
||||
NeedTypesSizes
|
||||
|
||||
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
|
||||
// Modifies CompiledGoFiles and Types, and has no effect on its own.
|
||||
typecheckCgo
|
||||
|
||||
// NeedModule adds Module.
|
||||
NeedModule
|
||||
)
|
||||
|
||||
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
|
||||
const NeedExportsFile = NeedExportFile
|
||||
|
||||
// From https://github.com/golang/tools/blob/v0.1.0/go/packages/external.go#L32
|
||||
// Most fields are disabled since there is no need for them
|
||||
type DriverRequest struct {
|
||||
Mode LoadMode `json:"mode"`
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
// Env []string `json:"env"`
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
// BuildFlags []string `json:"build_flags"`
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
// Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
func ReadDriverRequest(r io.Reader) (*DriverRequest, error) {
|
||||
req := &DriverRequest{}
|
||||
if err := json.NewDecoder(r).Decode(&req); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode driver request: %w", err)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ResolvePkgFunc func(importPath string) string
|
||||
|
||||
// Copy and pasted from golang.org/x/tools/go/packages
|
||||
type FlatPackagesError struct {
|
||||
Pos string // "file:line:col" or "file:line" or "" or "-"
|
||||
Msg string
|
||||
Kind FlatPackagesErrorKind
|
||||
}
|
||||
|
||||
type FlatPackagesErrorKind int
|
||||
|
||||
const (
|
||||
UnknownError FlatPackagesErrorKind = iota
|
||||
ListError
|
||||
ParseError
|
||||
TypeError
|
||||
)
|
||||
|
||||
func (err FlatPackagesError) Error() string {
|
||||
pos := err.Pos
|
||||
if pos == "" {
|
||||
pos = "-" // like token.Position{}.String()
|
||||
}
|
||||
return pos + ": " + err.Msg
|
||||
}
|
||||
|
||||
// FlatPackage is the JSON form of Package
|
||||
// It drops all the type and syntax fields, and transforms the Imports
|
||||
type FlatPackage struct {
|
||||
ID string
|
||||
Name string `json:",omitempty"`
|
||||
PkgPath string `json:",omitempty"`
|
||||
Errors []FlatPackagesError `json:",omitempty"`
|
||||
GoFiles []string `json:",omitempty"`
|
||||
CompiledGoFiles []string `json:",omitempty"`
|
||||
OtherFiles []string `json:",omitempty"`
|
||||
ExportFile string `json:",omitempty"`
|
||||
Imports map[string]string `json:",omitempty"`
|
||||
Standard bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
PackageFunc func(pkg *FlatPackage)
|
||||
PathResolverFunc func(path string) string
|
||||
)
|
||||
|
||||
func resolvePathsInPlace(prf PathResolverFunc, paths []string) {
|
||||
for i, path := range paths {
|
||||
paths[i] = prf(path)
|
||||
}
|
||||
}
|
||||
|
||||
func WalkFlatPackagesFromJSON(jsonFile string, onPkg PackageFunc) error {
|
||||
f, err := os.Open(jsonFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open package JSON file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).WithField("file", f.Name()).Error("unable to close file")
|
||||
}
|
||||
}()
|
||||
|
||||
decoder := json.NewDecoder(f)
|
||||
for decoder.More() {
|
||||
pkg := &FlatPackage{}
|
||||
if err := decoder.Decode(&pkg); err != nil {
|
||||
return fmt.Errorf("unable to decode package in %s: %w", f.Name(), err)
|
||||
}
|
||||
|
||||
onPkg(pkg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) ResolvePaths(prf PathResolverFunc) {
|
||||
resolvePathsInPlace(prf, fp.CompiledGoFiles)
|
||||
resolvePathsInPlace(prf, fp.GoFiles)
|
||||
resolvePathsInPlace(prf, fp.OtherFiles)
|
||||
fp.ExportFile = prf(fp.ExportFile)
|
||||
}
|
||||
|
||||
// FilterFilesForBuildTags filters the source files given the current build
|
||||
// tags.
|
||||
func (fp *FlatPackage) FilterFilesForBuildTags() {
|
||||
fp.GoFiles = filterSourceFilesForTags(fp.GoFiles)
|
||||
fp.CompiledGoFiles = filterSourceFilesForTags(fp.CompiledGoFiles)
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) filterTestSuffix(files []string) (err error, testFiles []string, xTestFiles, nonTestFiles []string) {
|
||||
for _, filename := range files {
|
||||
if strings.HasSuffix(filename, "_test.go") {
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, filename, nil, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err, nil, nil, nil
|
||||
}
|
||||
if f.Name.Name == fp.Name {
|
||||
testFiles = append(testFiles, filename)
|
||||
} else {
|
||||
xTestFiles = append(xTestFiles, filename)
|
||||
}
|
||||
} else {
|
||||
nonTestFiles = append(nonTestFiles, filename)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) MoveTestFiles() *FlatPackage {
|
||||
err, tgf, xtgf, gf := fp.filterTestSuffix(fp.GoFiles)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fp.GoFiles = append(gf, tgf...)
|
||||
fp.CompiledGoFiles = append(gf, tgf...)
|
||||
|
||||
if len(xtgf) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
newImports := make(map[string]string, len(fp.Imports))
|
||||
for k, v := range fp.Imports {
|
||||
newImports[k] = v
|
||||
}
|
||||
|
||||
newImports[fp.PkgPath] = fp.ID
|
||||
|
||||
// Clone package, only xtgf files
|
||||
return &FlatPackage{
|
||||
ID: fp.ID + "_xtest",
|
||||
Name: fp.Name + "_test",
|
||||
PkgPath: fp.PkgPath + "_test",
|
||||
Imports: newImports,
|
||||
Errors: fp.Errors,
|
||||
GoFiles: append([]string{}, xtgf...),
|
||||
CompiledGoFiles: append([]string{}, xtgf...),
|
||||
OtherFiles: fp.OtherFiles,
|
||||
ExportFile: fp.ExportFile,
|
||||
Standard: fp.Standard,
|
||||
}
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) IsStdlib() bool {
|
||||
return fp.Standard
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) ResolveImports(resolve ResolvePkgFunc) error {
|
||||
// Stdlib packages are already complete import wise
|
||||
if fp.IsStdlib() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
for _, file := range fp.CompiledGoFiles {
|
||||
f, err := parser.ParseFile(fset, file, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the name is not provided, fetch it from the sources
|
||||
if fp.Name == "" {
|
||||
fp.Name = f.Name.Name
|
||||
}
|
||||
|
||||
for _, rawImport := range f.Imports {
|
||||
imp, err := strconv.Unquote(rawImport.Path.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// We don't handle CGo for now
|
||||
if imp == "C" {
|
||||
continue
|
||||
}
|
||||
if _, ok := fp.Imports[imp]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if pkgID := resolve(imp); pkgID != "" {
|
||||
fp.Imports[imp] = pkgID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp *FlatPackage) IsRoot() bool {
|
||||
return strings.HasPrefix(fp.ID, "//")
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
ENV_JSON_INDEX_PATH = "PACKAGE_JSON_INVENTORY"
|
||||
ENV_PACKAGES_BASE = "PACKAGES_BASE"
|
||||
)
|
||||
|
||||
var ErrUnsetEnvVar = errors.New("required env var not set")
|
||||
|
||||
// LoadJsonListing reads the list of json package index files created by the bazel gopackagesdriver aspect:
|
||||
// https://github.com/bazelbuild/rules_go/blob/master/go/tools/gopackagesdriver/aspect.bzl
|
||||
// This list is serialized as a []string paths, relative to the bazel exec root.
|
||||
func LoadJsonListing() ([]string, error) {
|
||||
path, err := JsonIndexPathFromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadJsonIndex(path)
|
||||
}
|
||||
|
||||
func ReadJsonIndex(path string) ([]string, error) {
|
||||
um := make([]string, 0)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(b, &um); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return um, nil
|
||||
}
|
||||
|
||||
// JsonIndexPathFromEnv reads the path to the json index file from the environment.
|
||||
func JsonIndexPathFromEnv() (string, error) {
|
||||
p := os.Getenv(ENV_JSON_INDEX_PATH)
|
||||
if p == "" {
|
||||
return "", errors.Wrap(ErrUnsetEnvVar, ENV_JSON_INDEX_PATH)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func PackagesBaseFromEnv() (string, error) {
|
||||
p := os.Getenv(ENV_PACKAGES_BASE)
|
||||
if p == "" {
|
||||
return "", errors.Wrap(ErrUnsetEnvVar, ENV_PACKAGES_BASE)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestJsonList(t *testing.T) {
|
||||
path := "testdata/json-list.json"
|
||||
files, err := ReadJsonIndex(path)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(files))
|
||||
}
|
||||
|
||||
func TestJsonIndexPathFromEnv(t *testing.T) {
|
||||
cases := []struct {
|
||||
val string
|
||||
err error
|
||||
envname string
|
||||
getter func() (string, error)
|
||||
}{
|
||||
{
|
||||
getter: JsonIndexPathFromEnv,
|
||||
err: ErrUnsetEnvVar,
|
||||
},
|
||||
{
|
||||
getter: JsonIndexPathFromEnv,
|
||||
envname: ENV_JSON_INDEX_PATH,
|
||||
val: "/path/to/file",
|
||||
},
|
||||
{
|
||||
getter: PackagesBaseFromEnv,
|
||||
err: ErrUnsetEnvVar,
|
||||
},
|
||||
{
|
||||
getter: PackagesBaseFromEnv,
|
||||
envname: ENV_PACKAGES_BASE,
|
||||
val: "/path/to/base",
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
if c.envname != "" {
|
||||
t.Setenv(c.envname, c.val)
|
||||
}
|
||||
v, err := c.getter()
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.val, v)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type JSONPackagesDriver struct {
|
||||
registry *PackageRegistry
|
||||
}
|
||||
|
||||
func NewJSONPackagesDriver(jsonFiles []string, prf PathResolverFunc) (*JSONPackagesDriver, error) {
|
||||
jpd := &JSONPackagesDriver{
|
||||
registry: NewPackageRegistry(),
|
||||
}
|
||||
|
||||
for _, f := range jsonFiles {
|
||||
if err := WalkFlatPackagesFromJSON(f, func(pkg *FlatPackage) {
|
||||
jpd.registry.Add(pkg)
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("unable to walk json: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := jpd.registry.ResolvePaths(prf); err != nil {
|
||||
return nil, fmt.Errorf("unable to resolve paths: %w", err)
|
||||
}
|
||||
|
||||
if err := jpd.registry.ResolveImports(); err != nil {
|
||||
return nil, fmt.Errorf("unable to resolve paths: %w", err)
|
||||
}
|
||||
|
||||
return jpd, nil
|
||||
}
|
||||
|
||||
func (b *JSONPackagesDriver) Handle(req *DriverRequest, queries []string) *driverResponse {
|
||||
r, p := b.registry.Query(req, queries)
|
||||
return &driverResponse{
|
||||
NotHandled: false,
|
||||
Compiler: "gc",
|
||||
Arch: runtime.GOARCH,
|
||||
Roots: r,
|
||||
Packages: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *JSONPackagesDriver) GetResponse(labels []string) *driverResponse {
|
||||
rootPkgs, packages := b.registry.Match(labels)
|
||||
|
||||
return &driverResponse{
|
||||
NotHandled: false,
|
||||
Compiler: "gc",
|
||||
Arch: runtime.GOARCH,
|
||||
Roots: rootPkgs,
|
||||
Packages: packages,
|
||||
}
|
||||
}
|
||||
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*FlatPackage
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
var Logger *logrus.Logger
|
||||
|
||||
func init() {
|
||||
path := os.Getenv("GOPACKAGESDRIVER_LOG_PATH")
|
||||
if path == "" {
|
||||
path = filepath.Join(os.Getenv("PWD"), "genception.log")
|
||||
}
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err == nil {
|
||||
log.Out = file
|
||||
} else {
|
||||
log.Info("Failed to log to file, using default stderr")
|
||||
}
|
||||
Logger = log
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
// Copyright 2021 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PackageRegistry struct {
|
||||
packages map[string]*FlatPackage
|
||||
stdlib map[string]string
|
||||
}
|
||||
|
||||
func NewPackageRegistry(pkgs ...*FlatPackage) *PackageRegistry {
|
||||
pr := &PackageRegistry{
|
||||
packages: map[string]*FlatPackage{},
|
||||
stdlib: map[string]string{},
|
||||
}
|
||||
pr.Add(pkgs...)
|
||||
return pr
|
||||
}
|
||||
|
||||
func rewritePackage(pkg *FlatPackage) {
|
||||
pkg.ID = pkg.PkgPath
|
||||
for k := range pkg.Imports {
|
||||
// rewrite package ID mapping to be the same as the path
|
||||
pkg.Imports[k] = k
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if a is a superset of b
|
||||
func isSuperset(a, b []string) bool {
|
||||
if len(a) < len(b) {
|
||||
return false
|
||||
}
|
||||
bi := 0
|
||||
for i := range a {
|
||||
if a[i] == b[bi] {
|
||||
bi++
|
||||
if bi == len(b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Update merges the contents of 2 packages together in the instance where they have the same package path.
|
||||
// This can happen when the gopackages aspect traverses to a child label and generates separate json files transitive targets.
|
||||
// For example, in //proto/prysm/v1alpha1 we see both `:go_default_library` and `:go_proto` from `//proto/engine/v1`.
|
||||
// Without the merge, `:go_proto` can overwrite `:go_default_library`, leaving sources files out of the final graph.
|
||||
func (pr *PackageRegistry) Update(pkg *FlatPackage) {
|
||||
existing, ok := pr.packages[pkg.PkgPath]
|
||||
if !ok {
|
||||
pr.packages[pkg.PkgPath] = pkg
|
||||
return
|
||||
}
|
||||
if isSuperset(pkg.GoFiles, existing.GoFiles) {
|
||||
existing.GoFiles = pkg.GoFiles
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Add(pkgs ...*FlatPackage) *PackageRegistry {
|
||||
for _, pkg := range pkgs {
|
||||
rewritePackage(pkg)
|
||||
pr.packages[pkg.PkgPath] = pkg
|
||||
|
||||
if pkg.IsStdlib() {
|
||||
pr.stdlib[pkg.PkgPath] = pkg.ID
|
||||
}
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) ResolvePaths(prf PathResolverFunc) error {
|
||||
for _, pkg := range pr.packages {
|
||||
pkg.ResolvePaths(prf)
|
||||
pkg.FilterFilesForBuildTags()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResolveImports adds stdlib imports to packages. This is required because
|
||||
// stdlib packages are not part of the JSON file exports as bazel is unaware of
|
||||
// them.
|
||||
func (pr *PackageRegistry) ResolveImports() error {
|
||||
resolve := func(importPath string) string {
|
||||
if pkgID, ok := pr.stdlib[importPath]; ok {
|
||||
return pkgID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, pkg := range pr.packages {
|
||||
if err := pkg.ResolveImports(resolve); err != nil {
|
||||
return err
|
||||
}
|
||||
testFp := pkg.MoveTestFiles()
|
||||
if testFp != nil {
|
||||
pr.packages[testFp.ID] = testFp
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) walk(acc map[string]*FlatPackage, root string) {
|
||||
pkg := pr.packages[root]
|
||||
|
||||
if pkg == nil {
|
||||
log.WithField("root", root).Error("package ID not found")
|
||||
return
|
||||
}
|
||||
|
||||
acc[pkg.ID] = pkg
|
||||
for _, pkgID := range pkg.Imports {
|
||||
if _, ok := acc[pkgID]; !ok {
|
||||
pr.walk(acc, pkgID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Query(req *DriverRequest, queries []string) ([]string, []*FlatPackage) {
|
||||
walkedPackages := map[string]*FlatPackage{}
|
||||
retRoots := make([]string, 0, len(queries))
|
||||
for _, rootPkg := range queries {
|
||||
retRoots = append(retRoots, rootPkg)
|
||||
pr.walk(walkedPackages, rootPkg)
|
||||
}
|
||||
|
||||
retPkgs := make([]*FlatPackage, 0, len(walkedPackages))
|
||||
for _, pkg := range walkedPackages {
|
||||
retPkgs = append(retPkgs, pkg)
|
||||
}
|
||||
|
||||
return retRoots, retPkgs
|
||||
}
|
||||
|
||||
func (pr *PackageRegistry) Match(labels []string) ([]string, []*FlatPackage) {
|
||||
roots := map[string]struct{}{}
|
||||
|
||||
for _, label := range labels {
|
||||
// When packagesdriver is ran from rules go, rulesGoRepositoryName will just be @
|
||||
if !strings.HasPrefix(label, "@") {
|
||||
// Canonical labels is only since Bazel 6.0.0
|
||||
label = fmt.Sprintf("@%s", label)
|
||||
}
|
||||
|
||||
if label == RulesGoStdlibLabel {
|
||||
// For stdlib, we need to append all the subpackages as roots
|
||||
// since RulesGoStdLibLabel doesn't actually show up in the stdlib pkg.json
|
||||
for _, pkg := range pr.packages {
|
||||
if pkg.Standard {
|
||||
roots[pkg.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
roots[label] = struct{}{}
|
||||
// If an xtest package exists for this package add it to the roots
|
||||
if _, ok := pr.packages[label+"_xtest"]; ok {
|
||||
roots[label+"_xtest"] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
walkedPackages := map[string]*FlatPackage{}
|
||||
retRoots := make([]string, 0, len(roots))
|
||||
for rootPkg := range roots {
|
||||
retRoots = append(retRoots, rootPkg)
|
||||
pr.walk(walkedPackages, rootPkg)
|
||||
}
|
||||
|
||||
retPkgs := make([]*FlatPackage, 0, len(walkedPackages))
|
||||
for _, pkg := range walkedPackages {
|
||||
retPkgs = append(retPkgs, pkg)
|
||||
}
|
||||
|
||||
return retRoots, retPkgs
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsSuperset(t *testing.T) {
|
||||
cases := []struct {
|
||||
a []string
|
||||
b []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b"}, true},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c", "d"}, true},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c", "d", "e"}, false},
|
||||
{[]string{"a", "b", "c", "d"}, []string{"a", "b", "c"}, true},
|
||||
{[]string{}, []string{"a"}, false},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(strings.Join(c.a, "_")+"__"+strings.Join(c.b, "_"), func(t *testing.T) {
|
||||
if isSuperset(c.a, c.b) != c.expected {
|
||||
t.Errorf("isSuperset(%v, %v) != %v", c.a, c.b, c.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Recorder struct {
|
||||
base string
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func NewRecorder() (*Recorder, error) {
|
||||
base := os.Getenv("PWD")
|
||||
r := &Recorder{base: base, t: time.Now()}
|
||||
if err := r.Mkdir(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
func (r *Recorder) Dir() string {
|
||||
return path.Join(r.base, strconv.FormatInt(r.t.UTC().UnixNano(), 10))
|
||||
}
|
||||
|
||||
func (r *Recorder) Mkdir() error {
|
||||
return os.MkdirAll(r.Dir(), 0755)
|
||||
}
|
||||
|
||||
func (r *Recorder) RecordRequest(args []string, req *DriverRequest) error {
|
||||
b, err := json.Marshal(struct {
|
||||
Args []string
|
||||
Request *DriverRequest
|
||||
}{
|
||||
Args: args,
|
||||
Request: req,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path.Join(r.Dir(), "request.json"), b, 0644)
|
||||
}
|
||||
|
||||
func (r *Recorder) RecordResponse(resp *driverResponse) error {
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path.Join(r.Dir(), "response.json"), b, 0644)
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/io_bazel_rules_go/stdlib_/stdlib.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/com_github_thomaso_mirodin_intmath/constants/c64/c64.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/external/com_github_thomaso_mirodin_intmath/u64/u64.pkg.json",
|
||||
"bazel-out/darwin_arm64-fastbuild/bin/proto/prysm/v1alpha1/go_proto.pkg.json"
|
||||
]
|
||||
@@ -1,127 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", "GoSource", "go_context")
|
||||
load("@io_bazel_rules_go//go/tools/gopackagesdriver:aspect.bzl", "go_pkg_info_aspect", "GoPkgInfo")
|
||||
|
||||
_METHODICAL_TOOL = Label("//tools/genception:methodicalgen")
|
||||
_GENCEPTION_TOOL = Label("//tools/genception/cmd:cmd")
|
||||
_FASTSSZ_DEP = Label("@com_github_prysmaticlabs_fastssz//:go_default_library")
|
||||
|
||||
def _ssz_methodical_impl(ctx):
|
||||
go_ctx = go_context(ctx)
|
||||
all_json_files = {}
|
||||
stdlib = ''
|
||||
inputs = []
|
||||
#inputs += go_ctx.sdk.srcs
|
||||
#inputs += go_ctx.sdk.headers + go_ctx.sdk.srcs + go_ctx.sdk.tools
|
||||
ssz_sources = go_ctx.library_to_source(go_ctx, ctx.attr, ctx.attr.fastssz_lib[GoLibrary], ctx.coverage_instrumented())
|
||||
inputs += ssz_sources.srcs
|
||||
#sample = go_ctx.sdk.srcs[0].path
|
||||
for dep in ctx.attr.deps + [ctx.attr.fastssz_lib]:
|
||||
pkginfo = dep[OutputGroupInfo]
|
||||
if hasattr(pkginfo, "go_generated_srcs"):
|
||||
inputs += pkginfo.go_generated_srcs.to_list()
|
||||
# collect all the paths to json files dict keys for uniqueness
|
||||
json_files = pkginfo.go_pkg_driver_json_file.to_list()
|
||||
inputs += json_files
|
||||
if len(json_files) > 0:
|
||||
for jf in json_files:
|
||||
# presumably path is full path from exec root
|
||||
all_json_files[jf.path] = ""
|
||||
inputs += pkginfo.go_pkg_driver_srcs.to_list()
|
||||
inputs += pkginfo.go_pkg_driver_export_file.to_list()
|
||||
# we just ned to get the stdlib once
|
||||
#if stdlib == '' and hasattr(pkginfo, "go_pkg_driver_stdlib_json_file"):
|
||||
if stdlib == '':
|
||||
std_ds = pkginfo.go_pkg_driver_stdlib_json_file.to_list()
|
||||
if len(std_ds) > 0:
|
||||
stdlib = std_ds[0].path
|
||||
inputs += std_ds
|
||||
# concat the stdlib with all the other json file paths and write to disk
|
||||
json_out = [stdlib] + all_json_files.keys()
|
||||
all_pkg_list = ctx.actions.declare_file("methodical-pkg-list.json")
|
||||
ctx.actions.write(all_pkg_list, content = json.encode(json_out))
|
||||
#echo "sample = {sample}" &&
|
||||
#echo "{out_base}" &&
|
||||
out_base = ctx.outputs.out.root.path
|
||||
|
||||
args = [
|
||||
"gen",
|
||||
"--type-names=" + ",".join(ctx.attr.type_names),
|
||||
"--output=" + ctx.outputs.out.path,
|
||||
]
|
||||
if ctx.attr.target_package_name != "":
|
||||
args.append("--override-package-name=" + ctx.attr.target_package_name)
|
||||
|
||||
# Positional arg, needs to be after other --flags.
|
||||
args.append(ctx.attr.target_package)
|
||||
|
||||
codegen_bins = [ctx.file.genception, ctx.file.methodical_tool]
|
||||
ctx.actions.run_shell(
|
||||
env = {
|
||||
"PACKAGE_JSON_INVENTORY": all_pkg_list.path,
|
||||
"PACKAGES_BASE": out_base,
|
||||
# GOCACHE is required starting in Go 1.12
|
||||
"GOCACHE": "./.gocache",
|
||||
"GOPACKAGESDRIVER": ctx.file.genception.path,
|
||||
"GOPACKAGESDRIVER_LOG_PATH": out_base + "/gopackagesdriver.log",
|
||||
},
|
||||
|
||||
inputs = [all_pkg_list] + inputs + codegen_bins,
|
||||
outputs = [ctx.outputs.out],
|
||||
command = """
|
||||
echo $PACKAGE_JSON_INVENTORY &&
|
||||
echo $PACKAGES_BASE &&
|
||||
echo $PWD &&
|
||||
{cmd} {args}
|
||||
""".format(
|
||||
#sample = sample,
|
||||
out_base = out_base,
|
||||
json_list = all_pkg_list.path,
|
||||
cmd = "$(pwd)/" + ctx.file.methodical_tool.path,
|
||||
args = " ".join(args),
|
||||
out = ctx.outputs.out.path,
|
||||
),
|
||||
)
|
||||
|
||||
ssz_methodical = rule(
|
||||
implementation = _ssz_methodical_impl,
|
||||
attrs = {
|
||||
"type_names": attr.string_list(
|
||||
allow_empty = False,
|
||||
doc = "The names of the Go types to generate methods for.",
|
||||
mandatory = True,
|
||||
),
|
||||
'deps' : attr.label_list(aspects = [go_pkg_info_aspect]),
|
||||
"out": attr.output(
|
||||
doc = "The new Go file to emit the generated mocks into",
|
||||
),
|
||||
"_go_context_data": attr.label(
|
||||
default = "@io_bazel_rules_go//:go_context_data",
|
||||
),
|
||||
"methodical_tool": attr.label(
|
||||
doc = "The methodical tool (binary) to run",
|
||||
default = _METHODICAL_TOOL,
|
||||
allow_single_file = True,
|
||||
executable = True,
|
||||
cfg = "exec",
|
||||
mandatory = False,
|
||||
),
|
||||
"fastssz_lib": attr.label(providers = [GoLibrary], default = _FASTSSZ_DEP, aspects = [go_pkg_info_aspect]),
|
||||
"target_package": attr.string(
|
||||
doc = "The package path containing the types in type_names.",
|
||||
mandatory = True,
|
||||
),
|
||||
"target_package_name": attr.string(
|
||||
doc = "Override the name of the package the generated file is in (eg 'eth' for proto/prysm/v1alpha1)",
|
||||
mandatory = False,
|
||||
),
|
||||
"genception": attr.label(
|
||||
doc = "gopackagesdriver tool for package discovery inside bazel sandbox",
|
||||
default = _GENCEPTION_TOOL,
|
||||
allow_single_file = True,
|
||||
executable = True,
|
||||
cfg = "exec",
|
||||
mandatory = False,
|
||||
),
|
||||
},
|
||||
toolchains = ["@io_bazel_rules_go//go:toolchain"],
|
||||
)
|
||||
@@ -43,13 +43,14 @@ func selectAccounts(selectionPrompt string, pubKeys [][fieldparams.BLSPubkeyLeng
|
||||
results := make([]int, 0)
|
||||
au := aurora.NewAurora(true)
|
||||
if len(pubKeyStrings) > 5 {
|
||||
log.Warnf("there are more than %d potential public keys to exit, please consider using the --%s or --%s flags", 5, flags.VoluntaryExitPublicKeysFlag.Name, flags.ExitAllFlag.Name)
|
||||
log.Warnf("There are more than %d potential public keys to exit, please consider using the --%s or --%s flags", 5, flags.VoluntaryExitPublicKeysFlag.Name, flags.ExitAllFlag.Name)
|
||||
}
|
||||
log.Infof("Found a total of %d keys", len(pubKeyStrings))
|
||||
for result != exit {
|
||||
p := promptui.Select{
|
||||
Label: selectionPrompt,
|
||||
HideSelected: true,
|
||||
Size: len(pubKeyStrings),
|
||||
Size: 10, // Display 10 items at a time.
|
||||
Items: append([]string{exit, allAccountsText}, pubKeyStrings...),
|
||||
Templates: templates,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user