mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-26 21:59:13 -05:00
Compare commits
6 Commits
add-proces
...
testing-ch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e31ab0a09d | ||
|
|
520733ba55 | ||
|
|
bcf060619b | ||
|
|
37b27fdd3c | ||
|
|
6a9bcbab3a | ||
|
|
1b2524b0fe |
@@ -10,7 +10,10 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_pkg_errors//:go_default_library"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,6 +29,7 @@ type Client struct {
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
log.Info("hi")
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package altair
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -24,7 +23,7 @@ func ProcessPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -37,7 +36,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -122,7 +121,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
valid, err := helpers.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
@@ -34,8 +33,6 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -62,7 +59,6 @@ go_test(
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"deposit_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
@@ -91,7 +87,6 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package blocks
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -318,7 +319,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
err = helpers.VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package electra
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -37,7 +36,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -377,7 +376,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return nil
|
||||
}
|
||||
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
@@ -386,7 +385,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
validSig := allSignaturesVerified
|
||||
|
||||
if !allSignaturesVerified {
|
||||
validSig, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
validSig, err = helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
||||
Amount: pd.Amount,
|
||||
@@ -441,7 +440,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
verified, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"attestation.go",
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
@@ -23,6 +24,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -31,6 +33,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -54,6 +57,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"deposit_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
@@ -72,6 +76,7 @@ go_test(
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -80,6 +85,8 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blocks
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,9 +1,9 @@
|
||||
package blocks_test
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||
err = helpers.VerifyDeposit(beaconState, deposit)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
@@ -26,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -1044,112 +1042,27 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var root []byte
|
||||
blockID := r.PathValue("block_id")
|
||||
if blockID == "" {
|
||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
switch blockID {
|
||||
case "head":
|
||||
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if root == nil {
|
||||
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
case "finalized":
|
||||
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||
root = finalized.Root
|
||||
case "genesis":
|
||||
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
root = blkRoot[:]
|
||||
default:
|
||||
isHex := strings.HasPrefix(blockID, "0x")
|
||||
if isHex {
|
||||
blockIDBytes, err := hexutil.Decode(blockID)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(blockIDBytes) != fieldparams.RootLength {
|
||||
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = blockIDBytes
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasRoots {
|
||||
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if canonical {
|
||||
root = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
||||
if !shared.WriteBlockRootFetchError(w, err) {
|
||||
return
|
||||
}
|
||||
|
||||
b32Root := bytesutil.ToBytes32(root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.BlockRootResponse{
|
||||
Data: &structs.BlockRoot{
|
||||
Root: hexutil.Encode(root),
|
||||
Root: hexutil.Encode(root[:]),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
@@ -2509,6 +2509,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -2524,7 +2528,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: map[string]string{"block_id": "3bad0"},
|
||||
wantErr: "Could not parse block ID",
|
||||
wantErr: "Invalid block ID",
|
||||
wantCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
@@ -2572,7 +2576,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||
wantErr: "Could not find block",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
@@ -2585,7 +2589,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "no block",
|
||||
blockID: map[string]string{"block_id": "105"},
|
||||
wantErr: "Could not find any blocks with given slot",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
@@ -2633,6 +2637,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
@@ -2668,6 +2676,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
t.Run("true", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
@@ -26,21 +26,30 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
// writeBlockIdError handles common block ID lookup errors.
|
||||
// Returns true if an error was handled and written to the response, false if no error.
|
||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, fallbackMsg+": "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
if writeBlockIdError(w, err, "Could not get block from block ID") {
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
@@ -49,3 +58,10 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block root.
|
||||
// Returns true if no error occurred, false otherwise.
|
||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
||||
return !writeBlockIdError(w, err, "Could not get block root from block ID")
|
||||
}
|
||||
|
||||
@@ -105,3 +105,59 @@ func TestWriteBlockFetchError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Nil error should return true",
|
||||
err: nil,
|
||||
expectedReturn: true,
|
||||
},
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block root from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockRootFetchError(writer, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
if !c.expectedReturn {
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
@@ -225,6 +226,18 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
||||
return root, err
|
||||
}
|
||||
|
||||
// blobsContext holds common information needed for blob retrieval
|
||||
type blobsContext struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
|
||||
@@ -168,6 +168,111 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want [32]byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: genesisRoot[:],
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "hex root",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block at slot",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.want, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
RootToReturn [32]byte
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
@@ -39,6 +40,14 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
|
||||
// BlockRoot --
|
||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return [32]byte{}, m.ErrorToReturn
|
||||
}
|
||||
return m.RootToReturn, nil
|
||||
}
|
||||
|
||||
// BlobSidecars --
|
||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
|
||||
@@ -113,77 +113,100 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
withdrawalIndex := b.nextWithdrawalIndex
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
|
||||
// Electra partial withdrawals functionality.
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
if b.version >= version.Electra {
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range withdrawals {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
|
||||
if b.version < version.Electra {
|
||||
return withdrawalIndex, 0, nil
|
||||
}
|
||||
|
||||
ws := *withdrawals
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range ws {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
|
||||
*withdrawals = ws
|
||||
return withdrawalIndex, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) error {
|
||||
ws := *withdrawals
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
validatorsLen := b.validatorsLen()
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for range bound {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
}
|
||||
balance, err := b.balanceAtIndex(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
}
|
||||
if b.version >= version.Electra {
|
||||
var partiallyWithdrawnBalance uint64
|
||||
for _, w := range withdrawals {
|
||||
for _, w := range ws {
|
||||
if w.ValidatorIndex == validatorIndex {
|
||||
partiallyWithdrawnBalance += w.Amount
|
||||
}
|
||||
}
|
||||
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
return errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
}
|
||||
}
|
||||
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -191,7 +214,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
} else if helpers.IsPartiallyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -199,7 +222,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
if uint64(len(withdrawals)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
break
|
||||
}
|
||||
validatorIndex += 1
|
||||
@@ -208,7 +231,8 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
*withdrawals = ws
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||
|
||||
@@ -16,7 +16,7 @@ go_library(
|
||||
"//beacon-chain/state:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,7 +3,7 @@ package testing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -31,7 +31,7 @@ func GeneratePendingDeposit(t *testing.T, key common.SecretKey, amount uint64, w
|
||||
Amount: dm.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
return ðpb.PendingDeposit{
|
||||
|
||||
3
changelog/bastin_gloas-new-state-test.md
Normal file
3
changelog/bastin_gloas-new-state-test.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add `NewBeaconStateGloas()`.
|
||||
3
changelog/bastin_per-package-verbosity.md
Normal file
3
changelog/bastin_per-package-verbosity.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
||||
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- optimizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
||||
2
changelog/terencechain_move-deposit-to-helpers.md
Normal file
2
changelog/terencechain_move-deposit-to-helpers.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Move deposit helpers from `beacon-chain/core/blocks` to `beacon-chain/core/helpers` (refactor only).
|
||||
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Refactor expected withdrawals into reusable helpers for future forks.
|
||||
@@ -42,6 +42,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node"
|
||||
@@ -113,6 +114,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.PubsubQueueSize,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.EnableTracingFlag,
|
||||
cmd.TracingProcessNameFlag,
|
||||
cmd.TracingEndpointFlag,
|
||||
@@ -171,13 +173,22 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -191,11 +202,13 @@ func before(ctx *cli.Context) error {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -219,7 +232,7 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,6 +200,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,11 @@ var (
|
||||
Usage: "Logging verbosity. (trace, debug, info, warn, error, fatal, panic)",
|
||||
Value: "info",
|
||||
}
|
||||
// LogVModuleFlag defines per-package log levels.
|
||||
LogVModuleFlag = &cli.StringSliceFlag{
|
||||
Name: "log.vmodule",
|
||||
Usage: "Per-package log verbosity. packagePath=level entries separated by commas.",
|
||||
}
|
||||
// DataDirFlag defines a path on disk where Prysm databases are stored.
|
||||
DataDirFlag = &cli.StringFlag{
|
||||
Name: "datadir",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -237,3 +238,41 @@ func TestValidateNoArgs_SubcommandFlags(t *testing.T) {
|
||||
err = app.Run([]string{"command", "bar", "subComm2", "--barfoo100", "garbage", "subComm4"})
|
||||
require.ErrorContains(t, "unrecognized argument: garbage", err)
|
||||
}
|
||||
|
||||
func TestParseVModule(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
input := "beacon-chain/p2p=error, beacon-chain/light-client=trace"
|
||||
parsed, maxL, err := ParseVModule(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, logrus.ErrorLevel, parsed["beacon-chain/p2p"])
|
||||
require.Equal(t, logrus.TraceLevel, parsed["beacon-chain/light-client"])
|
||||
require.Equal(t, logrus.TraceLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
parsed, maxL, err := ParseVModule(" ")
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, parsed)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
tests := []string{
|
||||
"beacon-chain/p2p",
|
||||
"beacon-chain/p2p=",
|
||||
"beacon-chain/p2p/=error",
|
||||
"=info",
|
||||
"beacon-chain/*=info",
|
||||
"beacon-chain/p2p=meow",
|
||||
"/beacon-chain/p2p=info",
|
||||
"beacon-chain/../p2p=info",
|
||||
"beacon-chain/p2p=info,",
|
||||
"beacon-chain/p2p=info,beacon-chain/p2p=debug",
|
||||
}
|
||||
for _, input := range tests {
|
||||
_, maxL, err := ParseVModule(input)
|
||||
require.ErrorContains(t, "invalid", err)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -102,3 +103,63 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseVModule parses a comma-separated list of package=level entries.
|
||||
func ParseVModule(input string) (map[string]logrus.Level, logrus.Level, error) {
|
||||
var l logrus.Level
|
||||
trimmed := strings.TrimSpace(input)
|
||||
if trimmed == "" {
|
||||
return nil, l, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(trimmed, ",")
|
||||
result := make(map[string]logrus.Level, len(parts))
|
||||
for _, raw := range parts {
|
||||
entry := strings.TrimSpace(raw)
|
||||
if entry == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry: empty segment")
|
||||
}
|
||||
kv := strings.Split(entry, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: expected path=level", entry)
|
||||
}
|
||||
pkg := strings.TrimSpace(kv[0])
|
||||
levelText := strings.TrimSpace(kv[1])
|
||||
if pkg == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty package path", entry)
|
||||
}
|
||||
if levelText == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty level", entry)
|
||||
}
|
||||
if strings.Contains(pkg, "*") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: wildcards are not allowed", pkg)
|
||||
}
|
||||
if strings.ContainsAny(pkg, " \t\n") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: whitespace is not allowed", pkg)
|
||||
}
|
||||
if strings.HasPrefix(pkg, "/") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: leading slash is not allowed", pkg)
|
||||
}
|
||||
cleaned := path.Clean(pkg)
|
||||
if cleaned != pkg || pkg == "." || pkg == ".." {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: must be an absolute package path. (trailing slash not allowed)", pkg)
|
||||
}
|
||||
if _, exists := result[pkg]; exists {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: duplicate entry", pkg)
|
||||
}
|
||||
level, err := logrus.ParseLevel(levelText)
|
||||
if err != nil {
|
||||
return nil, l, fmt.Errorf("invalid vmodule level %q: must be one of panic, fatal, error, warn, info, debug, trace", levelText)
|
||||
}
|
||||
result[pkg] = level
|
||||
}
|
||||
|
||||
maxLevel := logrus.PanicLevel
|
||||
for _, lvl := range result {
|
||||
if lvl > maxLevel {
|
||||
maxLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
return result, maxLevel, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
accountcommands "github.com/OffchainLabs/prysm/v7/cmd/validator/accounts"
|
||||
@@ -95,6 +96,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.DataDirFlag,
|
||||
cmd.ClearDB,
|
||||
cmd.ForceClearDB,
|
||||
@@ -150,13 +152,22 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -172,11 +183,13 @@ func main() {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -197,7 +210,7 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -8,12 +8,13 @@ cd "$ROOT_DIR"
|
||||
./hack/gen-logs.sh
|
||||
|
||||
# Fail if that changed anything
|
||||
if ! git diff --quiet -- ./; then
|
||||
if ! git diff --quiet -- ./ || [[ -n "$(git ls-files --others --exclude-standard -- ./)" ]]; then
|
||||
echo "ERROR: log.go files are out of date. Please run:"
|
||||
echo " ./hack/gen-logs.sh"
|
||||
echo "and commit the changes."
|
||||
echo
|
||||
git diff --stat -- ./ || true
|
||||
git status --porcelain -- ./ || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ func addLogWriter(w io.Writer) {
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,6 +47,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
return nil
|
||||
}
|
||||
|
||||
maxVmoduleLevel := logrus.PanicLevel
|
||||
for _, level := range vmodule {
|
||||
if level > maxVmoduleLevel {
|
||||
maxVmoduleLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
@@ -54,11 +61,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
formatter.BaseVerbosity = lvl
|
||||
formatter.VModule = vmodule
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
|
||||
@@ -28,20 +28,20 @@ func TestMaskCredentialsLogging(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurePersistantLogging(t *testing.T) {
|
||||
func TestConfigurePersistentLogging(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -52,7 +52,7 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
|
||||
@@ -120,6 +120,13 @@ type TextFormatter struct {
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// VModule overrides the allowed log level for exact package paths.
|
||||
// When using VModule, you should also set BaseVerbosity to the default verbosity level provided by the user.
|
||||
VModule map[string]logrus.Level
|
||||
|
||||
// BaseVerbosity is the default verbosity level for all packages.
|
||||
BaseVerbosity logrus.Level
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
@@ -168,6 +175,11 @@ func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
// check for vmodule compatibility
|
||||
if !f.shouldLog(entry) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
@@ -236,6 +248,39 @@ func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) shouldLog(entry *logrus.Entry) bool {
|
||||
if len(f.VModule) == 0 {
|
||||
return true
|
||||
}
|
||||
packagePath, ok := entry.Data["package"]
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
packageName, ok := packagePath.(string)
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
|
||||
packageLevel := f.bestMatchLevel(packageName)
|
||||
|
||||
return entry.Level <= packageLevel
|
||||
}
|
||||
|
||||
// bestMatchLevel returns the level of the most specific path that matches package name.
|
||||
func (f *TextFormatter) bestMatchLevel(pkg string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := f.BaseVerbosity
|
||||
for k, v := range f.VModule {
|
||||
if k == pkg || strings.HasPrefix(pkg, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) (err error) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
|
||||
@@ -4230,7 +4230,7 @@
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/deposits.go
|
||||
search: func IsValidDepositSignature(
|
||||
- file: beacon-chain/core/blocks/deposit.go
|
||||
- file: beacon-chain/core/helpers/deposit.go
|
||||
search: func IsValidDepositSignature(
|
||||
spec: |
|
||||
<spec fn="is_valid_deposit_signature" fork="electra" hash="7347b754">
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -75,7 +74,7 @@ func processPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewBeaconState(options ...NewBeaconStateOption) (state.BeaconState, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateAltair creates a beacon state with minimum marshalable fields.
|
||||
@@ -167,7 +167,7 @@ func NewBeaconStateAltair(options ...func(state *ethpb.BeaconStateAltair) error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateBellatrix creates a beacon state with minimum marshalable fields.
|
||||
@@ -234,7 +234,7 @@ func NewBeaconStateBellatrix(options ...func(state *ethpb.BeaconStateBellatrix)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateCapella creates a beacon state with minimum marshalable fields.
|
||||
@@ -302,7 +302,7 @@ func NewBeaconStateCapella(options ...func(state *ethpb.BeaconStateCapella) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateDeneb creates a beacon state with minimum marshalable fields.
|
||||
@@ -370,7 +370,7 @@ func NewBeaconStateDeneb(options ...func(state *ethpb.BeaconStateDeneb) error) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateElectra creates a beacon state with minimum marshalable fields.
|
||||
@@ -438,7 +438,7 @@ func NewBeaconStateElectra(options ...func(state *ethpb.BeaconStateElectra) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateFulu creates a beacon state with minimum marshalable fields.
|
||||
@@ -507,7 +507,86 @@ func NewBeaconStateFulu(options ...func(state *ethpb.BeaconStateFulu) error) (st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateGloas creates a beacon state with minimum marshalable fields.
|
||||
func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (state.BeaconState, error) {
|
||||
pubkeys := make([][]byte, 512)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
seed := ðpb.BeaconStateGloas{
|
||||
BlockRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
StateRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: filledByteSlice2D(uint64(params.BeaconConfig().EpochsPerHistoricalVector), 32),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
},
|
||||
Eth1DataVotes: make([]*ethpb.Eth1Data, 0),
|
||||
HistoricalRoots: make([][]byte, 0),
|
||||
JustificationBits: bitfield.Bitvector4{0x0},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
LatestBlockHeader: HydrateBeaconHeader(ðpb.BeaconBlockHeader{}),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
PreviousEpochParticipation: make([]byte, 0),
|
||||
CurrentEpochParticipation: make([]byte, 0),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
BuilderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
err := opt(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var st, err = state_native.InitializeFromProtoUnsafeGloas(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// SSZ will fill 2D byte slices with their respective values, so we must fill these in too for round
|
||||
|
||||
@@ -68,6 +68,26 @@ func TestNewBeaconStateElectra(t *testing.T) {
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateFulu(t *testing.T) {
|
||||
st, err := NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateFulu{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateGloas(t *testing.T) {
|
||||
st, err := NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconState_HashTreeRoot(t *testing.T) {
|
||||
st, err := NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user