mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
#### This PR sets the foundation for the new logging features. --- The goal of this big PR is the following: 1. Adding a log.go file to every package: [_commit_](54f6396d4c) - Writing a bash script that adds the log.go file to every package that imports logrus, except the excluded packages, configured at the top of the bash script. - the log.go file creates a log variable and sets a field called `package` to the full path of that package. - I have tried to fix every error/problem that came from mass generation of this file. (duplicate declarations, different prefix names, etc...) - some packages had the log.go file from before, and had some helper functions in there as well. I've moved all of them to a `log_helpers.go` file within each package. 2. Create a CI rule which verifies that: [_commit_](b799c3a0ef) - every package which imports logrus, also has a log.go file, except the excluded packages. - the `package` field of each log.go variable, has the correct path. (to detect when we move a package or change it's name) - I pushed a commit with a manually changed log.go file to trigger the ci check failure and it worked. 3. Alter the logging system to read the prefix from this `package` field for every log while outputing: [_commit_](b0c7f1146c) - some packages have/want/need a different log prefix than their package name (like `kv`). This can be solved by keeping a map of package paths to prefix names somewhere. --- **Some notes:** - Please review everything carefully. - I created the `prefixReplacement` map and populated the data that I deemed necessary. Please check it and complain if something doesn't make sense or is missing. I attached at the bottom, the list of all the packages that used to use a different name than their package name as their prefix. - I have chosen to mark some packages to be excluded from this whole process. They will either not log anything, or log without a prefix, or log using their previously defined prefix. See the list of exclusions in the bottom. - I fixed all the tests that failed because of this change. These were failing because they were expecting the old prefix to be in the generated logs. I have changed those to expect the new `package` field instead. This might not be a great solution. Ideally we might want to remove this from the tests so they only test for relevant fields in the logs. but this is a problem for another day. - Please run the node with this config, and mention if you see something weird in the logs. (use different verbosities) - The CI workflow uses a script that basically runs the `hack/gen-logs.sh` and checks that the git diff is zero. that script is `hack/check-logs.sh`. This means that if one runs this script locally, it will not actually _check_ anything, rather than just regenerate the log.go files and fix any mistake. This might be confusing. Please suggest solutions if you think it's a problem. --- **A list of packages that used a different prefix than their package names for their logs:** - beacon-chain/cache/depositsnapshot/ package depositsnapshot, prefix "cache" - beacon-chain/core/transition/log.go — package transition, prefix "state" - beacon-chain/db/kv/log.go — package kv, prefix "db" - beacon-chain/db/slasherkv/log.go — package slasherkv, prefix "slasherdb" - beacon-chain/db/pruner/pruner.go — package pruner, prefix "db-pruner" - beacon-chain/light-client/log.go — package light_client, prefix "light-client" - beacon-chain/operations/attestations/log.go — package attestations, prefix "pool/attestations" - beacon-chain/operations/slashings/log.go — package slashings, prefix "pool/slashings" - beacon-chain/rpc/core/log.go — package core, prefix "rpc/core" - beacon-chain/rpc/eth/beacon/log.go — package beacon, prefix "rpc/beaconv1" - beacon-chain/rpc/eth/validator/log.go — package validator, prefix "beacon-api" - beacon-chain/rpc/prysm/v1alpha1/beacon/log.go — package beacon, prefix "rpc" - beacon-chain/rpc/prysm/v1alpha1/validator/log.go — package validator, prefix "rpc/validator" - beacon-chain/state/stategen/log.go — package stategen, prefix "state-gen" - beacon-chain/sync/checkpoint/log.go — package checkpoint, prefix "checkpoint-sync" - beacon-chain/sync/initial-sync/log.go — package initialsync, prefix "initial-sync" - cmd/prysmctl/p2p/log.go — package p2p, prefix "prysmctl-p2p" - config/features/log.go -- package features, prefix "flags" - io/file/log.go — package file, prefix "fileutil" - proto/prysm/v1alpha1/log.go — package eth, prefix "protobuf" - validator/client/beacon-api/log.go — package beacon_api, prefix "beacon-api" - validator/db/kv/log.go — package kv, prefix "db" - validator/db/filesystem/db.go — package filesystem, prefix "db" - validator/keymanager/derived/log.go — package derived, prefix "derived-keymanager" - validator/keymanager/local/log.go — package local, prefix "local-keymanager" - validator/keymanager/remote-web3signer/log.go — package remote_web3signer, prefix "remote-keymanager" - validator/keymanager/remote-web3signer/internal/log.go — package internal, prefix "remote-web3signer- internal" - beacon-chain/forkchoice/doubly... prefix is "forkchoice-doublylinkedtree" **List of excluded directories (their subdirectories are also excluded):** ``` EXCLUDED_PATH_PREFIXES=( "testing" "validator/client/testutil" "beacon-chain/p2p/testing" "beacon-chain/rpc/eth/config" "beacon-chain/rpc/prysm/v1alpha1/debug" "tools" "runtime" "monitoring" "io" "cmd" ".well-known" "changelog" "hack" "specrefs" "third_party" "bazel-out" "bazel-bin" "bazel-prysm" "bazel-testlogs" "build" ".github" ".jj" ".idea" ".vscode" ) ```
335 lines
9.5 KiB
Go
335 lines
9.5 KiB
Go
package kv
|
|
|
|
import (
|
|
"context"
|
|
"runtime"
|
|
"sync"
|
|
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
|
|
attaggregation "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
// AggregateUnaggregatedAttestations aggregates the unaggregated attestations and saves the
|
|
// newly aggregated attestations in the pool.
|
|
// It tracks the unaggregated attestations that weren't able to aggregate to prevent
|
|
// the deletion of unaggregated attestations in the pool.
|
|
func (c *AttCaches) AggregateUnaggregatedAttestations(ctx context.Context) error {
|
|
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregateUnaggregatedAttestations")
|
|
defer span.End()
|
|
unaggregatedAtts := c.UnaggregatedAttestations()
|
|
return c.aggregateUnaggregatedAtts(ctx, unaggregatedAtts)
|
|
}
|
|
|
|
func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedAtts []ethpb.Att) error {
|
|
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAtts")
|
|
defer span.End()
|
|
|
|
attsByVerAndDataRoot := make(map[attestation.Id][]ethpb.Att, len(unaggregatedAtts))
|
|
for _, att := range unaggregatedAtts {
|
|
id, err := attestation.NewId(att, attestation.Data)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not create attestation ID")
|
|
}
|
|
attsByVerAndDataRoot[id] = append(attsByVerAndDataRoot[id], att)
|
|
}
|
|
|
|
// Aggregate unaggregated attestations from the pool and save them in the pool.
|
|
// Track the unaggregated attestations that aren't able to aggregate.
|
|
leftOverUnaggregatedAtt := make(map[attestation.Id]bool)
|
|
|
|
leftOverUnaggregatedAtt = c.aggregateParallel(attsByVerAndDataRoot, leftOverUnaggregatedAtt)
|
|
|
|
// Remove the unaggregated attestations from the pool that were successfully aggregated.
|
|
for _, att := range unaggregatedAtts {
|
|
id, err := attestation.NewId(att, attestation.Full)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not create attestation ID")
|
|
}
|
|
if leftOverUnaggregatedAtt[id] {
|
|
continue
|
|
}
|
|
if err := c.DeleteUnaggregatedAttestation(att); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// aggregateParallel aggregates attestations in parallel for `atts` and saves them in the pool,
|
|
// returns the unaggregated attestations that weren't able to aggregate.
|
|
// Given `n` CPU cores, it creates a channel of size `n` and spawns `n` goroutines to aggregate attestations
|
|
func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftOver map[attestation.Id]bool) map[attestation.Id]bool {
|
|
var leftoverLock sync.Mutex
|
|
wg := sync.WaitGroup{}
|
|
|
|
n := runtime.GOMAXPROCS(0) // defaults to the value of runtime.NumCPU
|
|
ch := make(chan []ethpb.Att, n)
|
|
wg.Add(n)
|
|
for range n {
|
|
go func() {
|
|
defer wg.Done()
|
|
for as := range ch {
|
|
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
|
|
if err != nil {
|
|
log.WithError(err).Error("Could not aggregate unaggregated attestations")
|
|
continue
|
|
}
|
|
if aggregated == nil {
|
|
log.Error("Nil aggregated attestation")
|
|
continue
|
|
}
|
|
if aggregated.IsAggregated() {
|
|
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
|
|
log.WithError(err).Error("Could not save aggregated attestation")
|
|
continue
|
|
}
|
|
} else {
|
|
id, err := attestation.NewId(aggregated, attestation.Full)
|
|
if err != nil {
|
|
log.WithError(err).Error("Could not create attestation ID")
|
|
continue
|
|
}
|
|
leftoverLock.Lock()
|
|
leftOver[id] = true
|
|
leftoverLock.Unlock()
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
for _, as := range atts {
|
|
ch <- as
|
|
}
|
|
|
|
close(ch)
|
|
wg.Wait()
|
|
|
|
return leftOver
|
|
}
|
|
|
|
// SaveAggregatedAttestation saves an aggregated attestation in cache.
|
|
func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
|
|
if err := helpers.ValidateNilAttestation(att); err != nil {
|
|
return err
|
|
}
|
|
if !att.IsAggregated() {
|
|
return errors.New("attestation is not aggregated")
|
|
}
|
|
has, err := c.HasAggregatedAttestation(att)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if has {
|
|
return nil
|
|
}
|
|
|
|
seen, err := c.hasSeenBit(att)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if seen {
|
|
return nil
|
|
}
|
|
|
|
id, err := attestation.NewId(att, attestation.Data)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not create attestation ID")
|
|
}
|
|
copiedAtt := att.Clone()
|
|
|
|
c.aggregatedAttLock.Lock()
|
|
defer c.aggregatedAttLock.Unlock()
|
|
atts, ok := c.aggregatedAtt[id]
|
|
if !ok {
|
|
atts := []ethpb.Att{copiedAtt}
|
|
c.aggregatedAtt[id] = atts
|
|
return nil
|
|
}
|
|
|
|
atts, err = attaggregation.Aggregate(append(atts, copiedAtt))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
c.aggregatedAtt[id] = atts
|
|
|
|
return nil
|
|
}
|
|
|
|
// SaveAggregatedAttestations saves a list of aggregated attestations in cache.
|
|
func (c *AttCaches) SaveAggregatedAttestations(atts []ethpb.Att) error {
|
|
for _, att := range atts {
|
|
if err := c.SaveAggregatedAttestation(att); err != nil {
|
|
log.WithError(err).Debug("Could not save aggregated attestation")
|
|
if err := c.DeleteAggregatedAttestation(att); err != nil {
|
|
log.WithError(err).Debug("Could not delete aggregated attestation")
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// AggregatedAttestations returns the aggregated attestations in cache.
|
|
func (c *AttCaches) AggregatedAttestations() []ethpb.Att {
|
|
c.aggregatedAttLock.RLock()
|
|
defer c.aggregatedAttLock.RUnlock()
|
|
|
|
atts := make([]ethpb.Att, 0)
|
|
|
|
for _, a := range c.aggregatedAtt {
|
|
atts = append(atts, a...)
|
|
}
|
|
|
|
return atts
|
|
}
|
|
|
|
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
|
// filtered by committee index and slot.
|
|
func (c *AttCaches) AggregatedAttestationsBySlotIndex(
|
|
ctx context.Context,
|
|
slot primitives.Slot,
|
|
committeeIndex primitives.CommitteeIndex,
|
|
) []*ethpb.Attestation {
|
|
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
|
defer span.End()
|
|
|
|
atts := make([]*ethpb.Attestation, 0)
|
|
|
|
c.aggregatedAttLock.RLock()
|
|
defer c.aggregatedAttLock.RUnlock()
|
|
for _, as := range c.aggregatedAtt {
|
|
if as[0].Version() == version.Phase0 && slot == as[0].GetData().Slot && committeeIndex == as[0].GetData().CommitteeIndex {
|
|
for _, a := range as {
|
|
att, ok := a.(*ethpb.Attestation)
|
|
// This will never fail in practice because we asserted the version
|
|
if ok {
|
|
atts = append(atts, att)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return atts
|
|
}
|
|
|
|
// AggregatedAttestationsBySlotIndexElectra returns the aggregated attestations in cache,
|
|
// filtered by committee index and slot.
|
|
func (c *AttCaches) AggregatedAttestationsBySlotIndexElectra(
|
|
ctx context.Context,
|
|
slot primitives.Slot,
|
|
committeeIndex primitives.CommitteeIndex,
|
|
) []*ethpb.AttestationElectra {
|
|
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndexElectra")
|
|
defer span.End()
|
|
|
|
atts := make([]*ethpb.AttestationElectra, 0)
|
|
|
|
c.aggregatedAttLock.RLock()
|
|
defer c.aggregatedAttLock.RUnlock()
|
|
for _, as := range c.aggregatedAtt {
|
|
if as[0].Version() >= version.Electra && slot == as[0].GetData().Slot && as[0].CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
|
for _, a := range as {
|
|
att, ok := a.(*ethpb.AttestationElectra)
|
|
// This will never fail in practice because we asserted the version
|
|
if ok {
|
|
atts = append(atts, att)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return atts
|
|
}
|
|
|
|
// DeleteAggregatedAttestation deletes the aggregated attestations in cache.
|
|
func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
|
|
if err := helpers.ValidateNilAttestation(att); err != nil {
|
|
return err
|
|
}
|
|
if !att.IsAggregated() {
|
|
return errors.New("attestation is not aggregated")
|
|
}
|
|
|
|
if err := c.insertSeenBit(att); err != nil {
|
|
return err
|
|
}
|
|
|
|
id, err := attestation.NewId(att, attestation.Data)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not create attestation ID")
|
|
}
|
|
|
|
c.aggregatedAttLock.Lock()
|
|
defer c.aggregatedAttLock.Unlock()
|
|
attList, ok := c.aggregatedAtt[id]
|
|
if !ok {
|
|
return nil
|
|
}
|
|
|
|
filtered := make([]ethpb.Att, 0)
|
|
for _, a := range attList {
|
|
if c, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
|
|
return err
|
|
} else if !c {
|
|
filtered = append(filtered, a)
|
|
}
|
|
}
|
|
if len(filtered) == 0 {
|
|
delete(c.aggregatedAtt, id)
|
|
} else {
|
|
c.aggregatedAtt[id] = filtered
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// HasAggregatedAttestation checks if the input attestations has already existed in cache.
|
|
func (c *AttCaches) HasAggregatedAttestation(att ethpb.Att) (bool, error) {
|
|
if err := helpers.ValidateNilAttestation(att); err != nil {
|
|
return false, err
|
|
}
|
|
|
|
id, err := attestation.NewId(att, attestation.Data)
|
|
if err != nil {
|
|
return false, errors.Wrap(err, "could not create attestation ID")
|
|
}
|
|
|
|
c.aggregatedAttLock.RLock()
|
|
defer c.aggregatedAttLock.RUnlock()
|
|
if atts, ok := c.aggregatedAtt[id]; ok {
|
|
for _, a := range atts {
|
|
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
|
return false, err
|
|
} else if c {
|
|
return true, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
c.blockAttLock.RLock()
|
|
defer c.blockAttLock.RUnlock()
|
|
if atts, ok := c.blockAtt[id]; ok {
|
|
for _, a := range atts {
|
|
if c, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
|
return false, err
|
|
} else if c {
|
|
return true, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
}
|
|
|
|
// AggregatedAttestationCount returns the number of aggregated attestations key in the pool.
|
|
func (c *AttCaches) AggregatedAttestationCount() int {
|
|
c.aggregatedAttLock.RLock()
|
|
defer c.aggregatedAttLock.RUnlock()
|
|
return len(c.aggregatedAtt)
|
|
}
|