mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
<!-- Thanks for sending a PR! Before submitting: 1. If this is your first PR, check out our contribution guide here https://docs.prylabs.network/docs/contribute/contribution-guidelines You will then need to sign our Contributor License Agreement (CLA), which will show up as a comment from a bot in this pull request after you open it. We cannot review code without a signed CLA. 2. Please file an associated tracking issue if this pull request is non-trivial and requires context for our team to understand. All features and most bug fixes should have an associated issue with a design discussed and decided upon. Small bug fixes and documentation improvements don't need issues. 3. New features and bug fixes must have tests. Documentation may need to be updated. If you're unsure what to update, send the PR, and we'll discuss in review. 4. Note that PRs updating dependencies and new Go versions are not accepted. Please file an issue instead. 5. A changelog entry is required for user facing issues. --> **What type of PR is this?** Feature **What does this PR do? Why is it needed?** | Feature | Semi-Supernode | Supernode | | ----------------------- | ------------------------- | ------------------------ | | **Custody Groups** | 64 | 128 | | **Data Columns** | 64 | 128 | | **Storage** | ~50% | ~100% | | **Blob Reconstruction** | Yes (via Reed-Solomon) | No reconstruction needed | | **Flag** | `--semi-supernode` | `--supernode` | | **Can serve all blobs** | Yes (with reconstruction) | Yes (directly) | **note** if your validator total effective balance results in more custody than the semi-supernode it will override those those requirements. cgc=64 from @nalepae Pro: - We are useful to the network - Less disconnection likelihood - Straight forward to implement Con: - We cannot revert to a full node - We have to serve incoming RPC requests corresponding to 64 columns Tested the following using this kurtosis setup ``` participants: # Super-nodes - el_type: geth el_image: ethpandaops/geth:master cl_type: prysm vc_image: gcr.io/offchainlabs/prysm/validator:latest cl_image: gcr.io/offchainlabs/prysm/beacon-chain:latest count: 2 cl_extra_params: - --supernode vc_extra_params: - --verbosity=debug # Full-nodes - el_type: geth el_image: ethpandaops/geth:master cl_type: prysm vc_image: gcr.io/offchainlabs/prysm/validator:latest cl_image: gcr.io/offchainlabs/prysm/beacon-chain:latest count: 2 validator_count: 1 cl_extra_params: - --semi-supernode vc_extra_params: - --verbosity=debug additional_services: - dora - spamoor spamoor_params: image: ethpandaops/spamoor:master max_mem: 4000 spammers: - scenario: eoatx config: throughput: 200 - scenario: blobs config: throughput: 20 network_params: fulu_fork_epoch: 0 withdrawal_type: "0x02" preset: mainnet global_log_level: debug ``` ``` curl -H "Accept: application/json" http://127.0.0.1:32961/eth/v1/node/identity {"data":{"peer_id":"16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw","enr":"enr:-Ni4QIH5u2NQz17_pTe9DcCfUyG8TidDJJjIeBpJRRm4ACQzGBpCJdyUP9eGZzwwZ2HS1TnB9ACxFMQ5LP5njnMDLm-GAZqZEXjih2F0dG5ldHOIAAAAAAAwAACDY2djQIRldGgykLZy_whwAAA4__________-CaWSCdjSCaXCErBAAE4NuZmSEAAAAAIRxdWljgjLIiXNlY3AyNTZrMaECulJrXpSOBmCsQWcGYzQsst7r3-Owlc9iZbEcJTDkB6qIc3luY25ldHMFg3RjcIIyyIN1ZHCCLuA","p2p_addresses":["/ip4/172.16.0.19/tcp/13000/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw","/ip4/172.16.0.19/udp/13000/quic-v1/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw"],"discovery_addresses":["/ip4/172.16.0.19/udp/12000/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw"],"metadata":{"seq_number":"3","attnets":"0x0000000000300000","syncnets":"0x05","custody_group_count":"64"}}} ``` ``` curl -s http://127.0.0.1:32961/eth/v1/debug/beacon/data_column_sidecars/head | jq '.data | length' 64 ``` ``` curl -X 'GET' \ 'http://127.0.0.1:32961/eth/v1/beacon/blobs/head' \ -H 'accept: application/json' ``` **Which issues(s) does this PR fix?** Fixes # **Other notes for review** **Acknowledgements** - [x] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md). - [x] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd). - [x] I have added a description to this PR with sufficient context for reviewers to understand this PR. --------- Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> Co-authored-by: james-prysm <jhe@offchainlabs.com> Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
149 lines
4.5 KiB
Go
149 lines
4.5 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/OffchainLabs/prysm/v7/async"
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
|
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
"github.com/pkg/errors"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
var nilFinalizedStateError = errors.New("finalized state is nil")
|
|
|
|
func (s *Service) maintainCustodyInfo() {
|
|
const interval = 1 * time.Minute
|
|
|
|
async.RunEvery(s.ctx, interval, func() {
|
|
if err := s.updateCustodyInfoIfNeeded(); err != nil {
|
|
log.WithError(err).Error("Failed to update custody info")
|
|
}
|
|
})
|
|
}
|
|
|
|
func (s *Service) updateCustodyInfoIfNeeded() error {
|
|
const minimumPeerCount = 1
|
|
|
|
// Get our actual custody group count.
|
|
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
|
if err != nil {
|
|
return errors.Wrap(err, "p2p custody group count")
|
|
}
|
|
|
|
// Get our target custody group count.
|
|
targetCustodyGroupCount, err := s.custodyGroupCount(s.ctx)
|
|
if err != nil {
|
|
return errors.Wrap(err, "custody group count")
|
|
}
|
|
|
|
// If the actual custody group count is already equal to the target, skip the update.
|
|
if actualCustodyGrounpCount >= targetCustodyGroupCount {
|
|
return nil
|
|
}
|
|
|
|
// Check that all subscribed data column sidecars topics have at least `minimumPeerCount` peers.
|
|
topics := s.cfg.p2p.PubSub().GetTopics()
|
|
enoughPeers := true
|
|
for _, topic := range topics {
|
|
if !strings.Contains(topic, p2p.GossipDataColumnSidecarMessage) {
|
|
continue
|
|
}
|
|
|
|
if peers := s.cfg.p2p.PubSub().ListPeers(topic); len(peers) < minimumPeerCount {
|
|
// If a topic has fewer than the minimum required peers, log a warning.
|
|
log.WithFields(logrus.Fields{
|
|
"topic": topic,
|
|
"peerCount": len(peers),
|
|
"minimumPeerCount": minimumPeerCount,
|
|
}).Debug("Insufficient peers for data column sidecar topic to maintain custody count")
|
|
enoughPeers = false
|
|
}
|
|
}
|
|
|
|
if !enoughPeers {
|
|
return nil
|
|
}
|
|
|
|
headROBlock, err := s.cfg.chain.HeadBlock(s.ctx)
|
|
if err != nil {
|
|
return errors.Wrap(err, "head block")
|
|
}
|
|
headSlot := headROBlock.Block().Slot()
|
|
|
|
storedEarliestSlot, storedGroupCount, err := s.cfg.p2p.UpdateCustodyInfo(headSlot, targetCustodyGroupCount)
|
|
if err != nil {
|
|
return errors.Wrap(err, "p2p update custody info")
|
|
}
|
|
|
|
if _, _, err := s.cfg.beaconDB.UpdateCustodyInfo(s.ctx, storedEarliestSlot, storedGroupCount); err != nil {
|
|
return errors.Wrap(err, "beacon db update custody info")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// custodyGroupCount computes the custody group count based on the custody requirement,
|
|
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
|
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
|
cfg := params.BeaconConfig()
|
|
|
|
if flags.Get().Supernode {
|
|
return cfg.NumberOfCustodyGroups, nil
|
|
}
|
|
|
|
// Calculate validator custody requirements
|
|
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "validators custody requirement")
|
|
}
|
|
|
|
effectiveCustodyRequirement := max(cfg.CustodyRequirement, validatorsCustodyRequirement)
|
|
|
|
// If we're not in semi-supernode mode, just use the effective requirement.
|
|
if !flags.Get().SemiSupernode {
|
|
return effectiveCustodyRequirement, nil
|
|
}
|
|
|
|
// Semi-supernode mode custodies the minimum custody groups required for reconstruction.
|
|
// This is future-proof and works correctly even if custody groups != columns.
|
|
semiSupernodeTarget, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "minimum custody group count")
|
|
}
|
|
return max(effectiveCustodyRequirement, semiSupernodeTarget), nil
|
|
}
|
|
|
|
// validatorsCustodyRequirements computes the custody requirements based on the
|
|
// finalized state and the tracked validators.
|
|
func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
|
if s.trackedValidatorsCache == nil {
|
|
return 0, nil
|
|
}
|
|
// Get the indices of the tracked validators.
|
|
indices := s.trackedValidatorsCache.Indices()
|
|
|
|
// Return early if no validators are tracked.
|
|
if len(indices) == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
// Retrieve the finalized state.
|
|
finalizedState := s.cfg.stateGen.FinalizedState()
|
|
if finalizedState == nil || finalizedState.IsNil() {
|
|
return 0, nilFinalizedStateError
|
|
}
|
|
|
|
// Compute the validators custody requirements.
|
|
result, err := peerdas.ValidatorsCustodyRequirement(finalizedState, indices)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "validators custody requirements")
|
|
}
|
|
|
|
return result, nil
|
|
}
|