mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-03 17:45:09 -05:00
Compare commits
3 Commits
proposer-d
...
fix-loggin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
333f900c67 | ||
|
|
17544dafe0 | ||
|
|
143c777006 |
@@ -85,6 +85,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/io/logs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
@@ -89,34 +90,39 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
parentRoot := block.ParentRoot()
|
||||
|
||||
lessFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}
|
||||
moreFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(moreFields).Info("Synced new block")
|
||||
} else {
|
||||
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
|
||||
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,6 @@ type ChainService struct {
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
DependentRootCB func([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -759,10 +758,7 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
if c.DependentRootCB != nil {
|
||||
return c.DependentRootCB(root, epoch)
|
||||
}
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -329,16 +329,6 @@ func (s *Service) validatorEndpoints(
|
||||
handler: server.GetProposerDuties,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/validator/duties/proposer/{epoch}",
|
||||
name: namespace + ".GetProposerDutiesV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerDutiesV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/validator/duties/sync/{epoch}",
|
||||
name: namespace + ".GetSyncCommitteeDuties",
|
||||
|
||||
@@ -93,7 +93,6 @@ func Test_endpoints(t *testing.T) {
|
||||
validatorRoutes := map[string][]string{
|
||||
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v2/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
|
||||
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/attestation_data": {http.MethodGet},
|
||||
|
||||
@@ -982,29 +982,20 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// proposerDutiesInfo holds the computed proposer duties and associated metadata.
|
||||
type proposerDutiesInfo struct {
|
||||
duties []*structs.ProposerDuty
|
||||
isOptimistic bool
|
||||
lookupEpoch primitives.Epoch // epoch used for state lookup (adjusted for lookahead)
|
||||
dutiesEpoch primitives.Epoch // actual epoch duties are for
|
||||
st state.BeaconState
|
||||
}
|
||||
// GetProposerDuties requests beacon node to provide all validators that are scheduled to propose a block in the given epoch.
|
||||
func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetProposerDuties")
|
||||
defer span.End()
|
||||
|
||||
// computeProposerDuties computes proposer duties for the given epoch. It handles sync checking,
|
||||
// epoch parsing/validation, next-epoch lookahead, state fetch, assignment computation, duty building,
|
||||
// sorting, and optimistic check. It writes errors directly to w and returns nil if an error occurred.
|
||||
func (s *Server) computeProposerDuties(ctx context.Context, w http.ResponseWriter, r *http.Request) *proposerDutiesInfo {
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
_, requestedEpochUint, ok := shared.UintFromRoute(w, r, "epoch")
|
||||
if !ok {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
requestedEpoch := primitives.Epoch(requestedEpochUint)
|
||||
dutiesEpoch := requestedEpoch
|
||||
|
||||
cs := s.TimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(cs)
|
||||
@@ -1016,7 +1007,7 @@ func (s *Server) computeProposerDuties(ctx context.Context, w http.ResponseWrite
|
||||
fmt.Sprintf("Request epoch %d can not be greater than next epoch %d", requestedEpoch, currentEpoch+1),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return nil
|
||||
return
|
||||
} else if requestedEpoch == nextEpoch {
|
||||
// If the request is for the next epoch, we use the current epoch's state to compute duties.
|
||||
requestedEpoch = currentEpoch
|
||||
@@ -1026,18 +1017,18 @@ func (s *Server) computeProposerDuties(ctx context.Context, w http.ResponseWrite
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, dutiesEpoch)
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, nextEpoch)
|
||||
} else {
|
||||
assignments, err = helpers.ProposerAssignments(ctx, st, requestedEpoch)
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
duties := make([]*structs.ProposerDuty, 0)
|
||||
@@ -1045,7 +1036,7 @@ func (s *Server) computeProposerDuties(ctx context.Context, w http.ResponseWrite
|
||||
val, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get validator at index %d: %v", index, err), http.StatusInternalServerError)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
pubkey48 := val.PublicKey()
|
||||
pubkey := pubkey48[:]
|
||||
@@ -1058,103 +1049,35 @@ func (s *Server) computeProposerDuties(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil
|
||||
var dependentRoot []byte
|
||||
if requestedEpoch == 0 {
|
||||
r, err := s.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get genesis block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = r[:]
|
||||
} else {
|
||||
dependentRoot, err = proposalDependentRoot(st, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &proposerDutiesInfo{
|
||||
duties: duties,
|
||||
isOptimistic: isOptimistic,
|
||||
lookupEpoch: requestedEpoch,
|
||||
dutiesEpoch: dutiesEpoch,
|
||||
st: st,
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerDuties requests beacon node to provide all validators that are scheduled to propose a block in the given epoch.
|
||||
func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetProposerDuties")
|
||||
defer span.End()
|
||||
|
||||
info := s.computeProposerDuties(ctx, w, r)
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var dependentRoot []byte
|
||||
if info.lookupEpoch == 0 {
|
||||
root, err := s.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get genesis block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = root[:]
|
||||
} else {
|
||||
var err error
|
||||
dependentRoot, err = proposalDependentRoot(info.st, info.lookupEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &structs.GetProposerDutiesResponse{
|
||||
DependentRoot: hexutil.Encode(dependentRoot),
|
||||
Data: info.duties,
|
||||
ExecutionOptimistic: info.isOptimistic,
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetProposerDutiesV2 requests beacon node to provide all validators that are scheduled to propose a block in the given epoch.
|
||||
// V2 computes a fork-aware dependent root: post-Fulu uses DependentRootForEpoch(headRoot, epoch-1) to account for
|
||||
// the deterministic proposer lookahead, while pre-Fulu uses DependentRootForEpoch(headRoot, epoch) matching v1 semantics.
|
||||
func (s *Server) GetProposerDutiesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetProposerDutiesV2")
|
||||
defer span.End()
|
||||
|
||||
info := s.computeProposerDuties(ctx, w, r)
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var dependentRoot []byte
|
||||
if info.dutiesEpoch == 0 {
|
||||
root, err := s.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get genesis block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = root[:]
|
||||
} else {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
depEpoch := info.dutiesEpoch
|
||||
if depEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
depEpoch = info.dutiesEpoch - 1
|
||||
}
|
||||
root, err := s.HeadFetcher.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), depEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
dependentRoot = root[:]
|
||||
}
|
||||
|
||||
resp := &structs.GetProposerDutiesResponse{
|
||||
DependentRoot: hexutil.Encode(dependentRoot),
|
||||
Data: info.duties,
|
||||
ExecutionOptimistic: info.isOptimistic,
|
||||
Data: duties,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
@@ -2542,175 +2542,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDutiesV2(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := util.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
|
||||
t.Run("epoch 0 returns genesis block root", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v2/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDutiesV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(genesisRoot[:]), resp.DependentRoot)
|
||||
assert.Equal(t, 31, len(resp.Data))
|
||||
})
|
||||
t.Run("pre-fulu uses DependentRootForEpoch with dutiesEpoch", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 100 // well beyond our test epoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
chainSlot := primitives.Slot(0)
|
||||
preFuluRoot := [32]byte{'p', 'r', 'e'}
|
||||
var capturedEpoch primitives.Epoch
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
DependentRootCB: func(_ [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
capturedEpoch = epoch
|
||||
return preFuluRoot, nil
|
||||
},
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Request epoch 1 (pre-Fulu since FuluForkEpoch=100).
|
||||
// V2 pre-Fulu calls DependentRootForEpoch(headRoot, dutiesEpoch=1).
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v2/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDutiesV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(preFuluRoot[:]), resp.DependentRoot)
|
||||
assert.Equal(t, primitives.Epoch(1), capturedEpoch, "pre-Fulu should pass dutiesEpoch to DependentRootForEpoch")
|
||||
assert.Equal(t, 32, len(resp.Data))
|
||||
})
|
||||
t.Run("post-fulu uses DependentRootForEpoch with dutiesEpoch-1", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0 // Fulu active from genesis
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
chainSlot := primitives.Slot(0)
|
||||
postFuluRoot := [32]byte{'p', 'o', 's', 't'}
|
||||
var capturedEpoch primitives.Epoch
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
DependentRootCB: func(_ [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
capturedEpoch = epoch
|
||||
return postFuluRoot, nil
|
||||
},
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Request epoch 1 (post-Fulu since FuluForkEpoch=0).
|
||||
// V2 post-Fulu calls DependentRootForEpoch(headRoot, dutiesEpoch-1=0).
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v2/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDutiesV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, hexutil.Encode(postFuluRoot[:]), resp.DependentRoot)
|
||||
assert.Equal(t, primitives.Epoch(0), capturedEpoch, "post-Fulu should pass dutiesEpoch-1 to DependentRootForEpoch")
|
||||
assert.Equal(t, 32, len(resp.Data))
|
||||
})
|
||||
t.Run("next epoch lookahead", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err, "Could not set up genesis state")
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
chainSlot := primitives.Slot(0)
|
||||
targetRoot := [32]byte{'n', 'e', 'x', 't'}
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot, TargetRoot: targetRoot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v2/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDutiesV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, 32, len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
3
changelog/bastin_fix-logging-issue.md
Normal file
3
changelog/bastin_fix-logging-issue.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Fixed the logging issue described in #16314.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- /eth/v2/validator/duties/proposer/{epoch} implementation with updated dependent root info.
|
||||
@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -210,6 +210,7 @@ func before(ctx *cli.Context) error {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -164,8 +164,8 @@ func main() {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -188,6 +188,7 @@ func main() {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -10,6 +11,7 @@ type WriterHook struct {
|
||||
AllowedLevels []logrus.Level
|
||||
Writer io.Writer
|
||||
Formatter logrus.Formatter
|
||||
Identifier string
|
||||
}
|
||||
|
||||
func (hook *WriterHook) Levels() []logrus.Level {
|
||||
@@ -20,6 +22,11 @@ func (hook *WriterHook) Levels() []logrus.Level {
|
||||
}
|
||||
|
||||
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
|
||||
val, ok := entry.Data[LogTargetField]
|
||||
if ok && fmt.Sprint(val) != hook.Identifier {
|
||||
return nil
|
||||
}
|
||||
|
||||
line, err := hook.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -17,11 +17,43 @@ import (
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var ephemeralLogFileVerbosity = logrus.DebugLevel
|
||||
var (
|
||||
userVerbosity = logrus.InfoLevel
|
||||
vmodule = make(map[string]logrus.Level)
|
||||
)
|
||||
|
||||
// SetLoggingLevel sets the base logging level for logrus.
|
||||
func SetLoggingLevel(lvl logrus.Level) {
|
||||
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
|
||||
const (
|
||||
ephemeralLogFileVerbosity = logrus.DebugLevel
|
||||
LogTargetField = "log_target"
|
||||
LogTargetEphemeral = "ephemeral"
|
||||
LogTargetUser = "user"
|
||||
)
|
||||
|
||||
// SetLoggingLevelAndData sets the base logging level for logrus.
|
||||
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
|
||||
userVerbosity = baseVerbosity
|
||||
vmodule = vmoduleMap
|
||||
|
||||
globalLevel := max(baseVerbosity, maxVmoduleLevel)
|
||||
if !disableEphemeral {
|
||||
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
|
||||
}
|
||||
logrus.SetLevel(globalLevel)
|
||||
}
|
||||
|
||||
// PackageVerbosity returns the verbosity of a given package.
|
||||
func PackageVerbosity(packagePath string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := userVerbosity
|
||||
for k, v := range vmodule {
|
||||
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func addLogWriter(w io.Writer) {
|
||||
@@ -68,6 +100,7 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
Identifier: LogTargetUser,
|
||||
})
|
||||
|
||||
logrus.Debug("File logging initialized")
|
||||
@@ -101,6 +134,7 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
|
||||
Formatter: formatter,
|
||||
Writer: debugWriter,
|
||||
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
|
||||
Identifier: LogTargetEphemeral,
|
||||
})
|
||||
|
||||
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")
|
||||
|
||||
@@ -334,7 +334,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys
|
||||
_, err = fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
if k != "package" {
|
||||
if k != "package" && k != "log_target" {
|
||||
v := entry.Data[k]
|
||||
|
||||
format := "%+v"
|
||||
|
||||
Reference in New Issue
Block a user