Compare commits

..

3 Commits

Author SHA1 Message Date
Bastin
333f900c67 changelog 2026-02-03 17:20:10 +01:00
Bastin
17544dafe0 fix logStateTransitionData() 2026-02-03 16:48:22 +01:00
Bastin
143c777006 hook filtering & save data 2026-02-03 16:47:53 +01:00
16 changed files with 90 additions and 164 deletions

View File

@@ -540,12 +540,6 @@ type PayloadAttestation struct {
Signature string `json:"signature"`
}
type PayloadAttestationMessage struct {
ValidatorIndex string `json:"validator_index"`
Data *PayloadAttestationData `json:"data"`
Signature string `json:"signature"`
}
type BeaconBlockBodyGloas struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`

View File

@@ -2971,14 +2971,6 @@ func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *Payload
}
}
func PayloadAttestationMessageFromConsensus(m *eth.PayloadAttestationMessage) *PayloadAttestationMessage {
return &PayloadAttestationMessage{
ValidatorIndex: fmt.Sprintf("%d", m.ValidatorIndex),
Data: PayloadAttestationDataFromConsensus(m.Data),
Signature: hexutil.Encode(m.Signature),
}
}
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
if b == nil {
return nil, errNilValue

View File

@@ -112,8 +112,3 @@ type LightClientOptimisticUpdateEvent struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type ExecutionPayloadAvailableEvent struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root"`
}

View File

@@ -85,6 +85,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -89,34 +90,39 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if err != nil {
return err
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
parentRoot := block.ParentRoot()
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
} else {
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
}
return nil
}

View File

@@ -46,14 +46,6 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionPayloadBidReceived is sent after a signed execution payload bid is received from gossip or API
// that passes gossip validation on the execution_payload_bid topic.
ExecutionPayloadBidReceived = 13
// PayloadAttestationMessageReceived is sent after a payload attestation message is received
// that passes validation rules of the payload_attestation_message topic.
PayloadAttestationMessageReceived = 14
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -122,13 +114,3 @@ type DataColumnReceivedData struct {
BlockRoot [32]byte
KzgCommitments [][]byte
}
// ExecutionPayloadBidReceivedData is the data sent with ExecutionPayloadBidReceived events.
type ExecutionPayloadBidReceivedData struct {
SignedBid *ethpb.SignedExecutionPayloadBid
}
// PayloadAttestationMessageReceivedData is the data sent with PayloadAttestationMessageReceived events.
type PayloadAttestationMessageReceivedData struct {
PayloadAttestationMessage *ethpb.PayloadAttestationMessage
}

View File

@@ -33,9 +33,6 @@ const (
LightClientOptimisticUpdate
// PayloadAttributes events are fired upon a missed slot or new head.
PayloadAttributes
// ExecutionPayloadAvailable is sent when the node has verified that the execution payload
// and blobs for a block are available and ready for payload attestation.
ExecutionPayloadAvailable
)
// BlockProcessedData is the data sent with BlockProcessed events.
@@ -75,11 +72,3 @@ type InitializedData struct {
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
GenesisValidatorsRoot []byte
}
// ExecutionPayloadAvailableData is the data sent with ExecutionPayloadAvailable events.
type ExecutionPayloadAvailableData struct {
// Slot is the slot of the block whose execution payload became available.
Slot primitives.Slot
// BlockRoot is the root of the block whose execution payload became available.
BlockRoot [32]byte
}

View File

@@ -74,12 +74,6 @@ const (
LightClientOptimisticUpdateTopic = "light_client_optimistic_update"
// DataColumnTopic represents a data column sidecar event topic
DataColumnTopic = "data_column_sidecar"
// ExecutionPayloadAvailableTopic represents an event indicating execution payload and blobs are available.
ExecutionPayloadAvailableTopic = "execution_payload_available"
// ExecutionPayloadBidTopic represents an event for a signed execution payload bid passing gossip validation.
ExecutionPayloadBidTopic = "execution_payload_bid"
// PayloadAttestationMessageTopic represents an event for a payload attestation message passing validation.
PayloadAttestationMessageTopic = "payload_attestation_message"
)
var (
@@ -114,8 +108,6 @@ var opsFeedEventTopics = map[feed.EventType]string{
operation.ProposerSlashingReceived: ProposerSlashingTopic,
operation.BlockGossipReceived: BlockGossipTopic,
operation.DataColumnReceived: DataColumnTopic,
operation.ExecutionPayloadBidReceived: ExecutionPayloadBidTopic,
operation.PayloadAttestationMessageReceived: PayloadAttestationMessageTopic,
}
var stateFeedEventTopics = map[feed.EventType]string{
@@ -126,7 +118,6 @@ var stateFeedEventTopics = map[feed.EventType]string{
statefeed.Reorg: ChainReorgTopic,
statefeed.BlockProcessed: BlockTopic,
statefeed.PayloadAttributes: PayloadAttributesTopic,
statefeed.ExecutionPayloadAvailable: ExecutionPayloadAvailableTopic,
}
var topicsForStateFeed = topicsForFeed(stateFeedEventTopics)
@@ -475,12 +466,6 @@ func topicForEvent(event *feed.Event) string {
return PayloadAttributesTopic
case *operation.DataColumnReceivedData:
return DataColumnTopic
case *operation.ExecutionPayloadBidReceivedData:
return ExecutionPayloadBidTopic
case *operation.PayloadAttestationMessageReceivedData:
return PayloadAttestationMessageTopic
case *statefeed.ExecutionPayloadAvailableData:
return ExecutionPayloadAvailableTopic
default:
return InvalidTopic
}
@@ -653,21 +638,6 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
}
return jsonMarshalReader(eventName, blk)
}, nil
case *statefeed.ExecutionPayloadAvailableData:
return func() io.Reader {
return jsonMarshalReader(eventName, &structs.ExecutionPayloadAvailableEvent{
Slot: fmt.Sprintf("%d", v.Slot),
BlockRoot: hexutil.Encode(v.BlockRoot[:]),
})
}, nil
case *operation.ExecutionPayloadBidReceivedData:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.SignedExecutionPayloadBidFromConsensus(v.SignedBid))
}, nil
case *operation.PayloadAttestationMessageReceivedData:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.PayloadAttestationMessageFromConsensus(v.PayloadAttestationMessage))
}, nil
default:
return nil, errors.Wrapf(errUnhandledEventData, "event data type %T unsupported", v)
}

View File

@@ -123,8 +123,6 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
ProposerSlashingTopic,
BlockGossipTopic,
DataColumnTopic,
ExecutionPayloadBidTopic,
PayloadAttestationMessageTopic,
})
require.NoError(t, err)
ro, err := blocks.NewROBlob(util.HydrateBlobSidecar(&eth.BlobSidecar{}))
@@ -314,42 +312,6 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
KzgCommitments: [][]byte{{'a'}, {'b'}, {'c'}},
},
},
{
Type: operation.ExecutionPayloadBidReceived,
Data: &operation.ExecutionPayloadBidReceivedData{
SignedBid: &eth.SignedExecutionPayloadBid{
Message: &eth.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
GasLimit: 30000000,
BuilderIndex: 42,
Slot: 10,
Value: 1000000000,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
},
},
{
Type: operation.PayloadAttestationMessageReceived,
Data: &operation.PayloadAttestationMessageReceivedData{
PayloadAttestationMessage: &eth.PayloadAttestationMessage{
ValidatorIndex: 123,
Data: &eth.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 10,
PayloadPresent: true,
BlobDataAvailable: true,
},
Signature: make([]byte, 96),
},
},
},
}
}
@@ -431,7 +393,6 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
FinalizedCheckpointTopic,
ChainReorgTopic,
BlockTopic,
ExecutionPayloadAvailableTopic,
})
require.NoError(t, err)
request := topics.testHttpRequest(testSync.ctx, t)
@@ -484,13 +445,6 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
ExecutionOptimistic: false,
},
},
{
Type: statefeed.ExecutionPayloadAvailable,
Data: &statefeed.ExecutionPayloadAvailableData{
Slot: 10,
BlockRoot: [32]byte{0x9a},
},
},
}
go func() {
@@ -767,7 +721,7 @@ func TestStuckReaderScenarios(t *testing.T) {
func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) {
topics, events := operationEventsFixtures(t)
require.Equal(t, 14, len(events))
require.Equal(t, 12, len(events))
// set eventFeedDepth to a number lower than the events we intend to send to force the server to drop the reader.
stn := mockChain.NewEventFeedWrapper()

View File

@@ -0,0 +1,3 @@
### Changed
- Fixed the logging issue described in #16314.

View File

@@ -1,3 +0,0 @@
### Added
- the following events available at gloas `execution_payload_available`, `execution_payload_bid`,and `payload_attestation_message`

View File

@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
format := ctx.String(cmd.LogFormat.Name)
switch format {
@@ -210,6 +210,7 @@ func before(ctx *cli.Context) error {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -164,8 +164,8 @@ func main() {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
logFileName := ctx.String(cmd.LogFileName.Name)
@@ -188,6 +188,7 @@ func main() {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -1,6 +1,7 @@
package logs
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
@@ -10,6 +11,7 @@ type WriterHook struct {
AllowedLevels []logrus.Level
Writer io.Writer
Formatter logrus.Formatter
Identifier string
}
func (hook *WriterHook) Levels() []logrus.Level {
@@ -20,6 +22,11 @@ func (hook *WriterHook) Levels() []logrus.Level {
}
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
val, ok := entry.Data[LogTargetField]
if ok && fmt.Sprint(val) != hook.Identifier {
return nil
}
line, err := hook.Formatter.Format(entry)
if err != nil {
return err

View File

@@ -17,11 +17,43 @@ import (
"gopkg.in/natefinch/lumberjack.v2"
)
var ephemeralLogFileVerbosity = logrus.DebugLevel
var (
userVerbosity = logrus.InfoLevel
vmodule = make(map[string]logrus.Level)
)
// SetLoggingLevel sets the base logging level for logrus.
func SetLoggingLevel(lvl logrus.Level) {
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
const (
ephemeralLogFileVerbosity = logrus.DebugLevel
LogTargetField = "log_target"
LogTargetEphemeral = "ephemeral"
LogTargetUser = "user"
)
// SetLoggingLevelAndData sets the base logging level for logrus.
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
userVerbosity = baseVerbosity
vmodule = vmoduleMap
globalLevel := max(baseVerbosity, maxVmoduleLevel)
if !disableEphemeral {
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
}
logrus.SetLevel(globalLevel)
}
// PackageVerbosity returns the verbosity of a given package.
func PackageVerbosity(packagePath string) logrus.Level {
bestLen := 0
bestLevel := userVerbosity
for k, v := range vmodule {
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
if len(k) > bestLen {
bestLen = len(k)
bestLevel = v
}
}
}
return bestLevel
}
func addLogWriter(w io.Writer) {
@@ -68,6 +100,7 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
Formatter: formatter,
Writer: f,
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
Identifier: LogTargetUser,
})
logrus.Debug("File logging initialized")
@@ -101,6 +134,7 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
Formatter: formatter,
Writer: debugWriter,
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
Identifier: LogTargetEphemeral,
})
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")

View File

@@ -334,7 +334,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys
_, err = fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
}
for _, k := range keys {
if k != "package" {
if k != "package" && k != "log_target" {
v := entry.Data[k]
format := "%+v"