mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-07 20:43:57 -05:00
<!-- Thanks for sending a PR! Before submitting: 1. If this is your first PR, check out our contribution guide here https://docs.prylabs.network/docs/contribute/contribution-guidelines You will then need to sign our Contributor License Agreement (CLA), which will show up as a comment from a bot in this pull request after you open it. We cannot review code without a signed CLA. 2. Please file an associated tracking issue if this pull request is non-trivial and requires context for our team to understand. All features and most bug fixes should have an associated issue with a design discussed and decided upon. Small bug fixes and documentation improvements don't need issues. 3. New features and bug fixes must have tests. Documentation may need to be updated. If you're unsure what to update, send the PR, and we'll discuss in review. 4. Note that PRs updating dependencies and new Go versions are not accepted. Please file an issue instead. 5. A changelog entry is required for user facing issues. --> **What type of PR is this?** Feature **What does this PR do? Why is it needed?** | Feature | Semi-Supernode | Supernode | | ----------------------- | ------------------------- | ------------------------ | | **Custody Groups** | 64 | 128 | | **Data Columns** | 64 | 128 | | **Storage** | ~50% | ~100% | | **Blob Reconstruction** | Yes (via Reed-Solomon) | No reconstruction needed | | **Flag** | `--semi-supernode` | `--supernode` | | **Can serve all blobs** | Yes (with reconstruction) | Yes (directly) | **note** if your validator total effective balance results in more custody than the semi-supernode it will override those those requirements. cgc=64 from @nalepae Pro: - We are useful to the network - Less disconnection likelihood - Straight forward to implement Con: - We cannot revert to a full node - We have to serve incoming RPC requests corresponding to 64 columns Tested the following using this kurtosis setup ``` participants: # Super-nodes - el_type: geth el_image: ethpandaops/geth:master cl_type: prysm vc_image: gcr.io/offchainlabs/prysm/validator:latest cl_image: gcr.io/offchainlabs/prysm/beacon-chain:latest count: 2 cl_extra_params: - --supernode vc_extra_params: - --verbosity=debug # Full-nodes - el_type: geth el_image: ethpandaops/geth:master cl_type: prysm vc_image: gcr.io/offchainlabs/prysm/validator:latest cl_image: gcr.io/offchainlabs/prysm/beacon-chain:latest count: 2 validator_count: 1 cl_extra_params: - --semi-supernode vc_extra_params: - --verbosity=debug additional_services: - dora - spamoor spamoor_params: image: ethpandaops/spamoor:master max_mem: 4000 spammers: - scenario: eoatx config: throughput: 200 - scenario: blobs config: throughput: 20 network_params: fulu_fork_epoch: 0 withdrawal_type: "0x02" preset: mainnet global_log_level: debug ``` ``` curl -H "Accept: application/json" http://127.0.0.1:32961/eth/v1/node/identity {"data":{"peer_id":"16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw","enr":"enr:-Ni4QIH5u2NQz17_pTe9DcCfUyG8TidDJJjIeBpJRRm4ACQzGBpCJdyUP9eGZzwwZ2HS1TnB9ACxFMQ5LP5njnMDLm-GAZqZEXjih2F0dG5ldHOIAAAAAAAwAACDY2djQIRldGgykLZy_whwAAA4__________-CaWSCdjSCaXCErBAAE4NuZmSEAAAAAIRxdWljgjLIiXNlY3AyNTZrMaECulJrXpSOBmCsQWcGYzQsst7r3-Owlc9iZbEcJTDkB6qIc3luY25ldHMFg3RjcIIyyIN1ZHCCLuA","p2p_addresses":["/ip4/172.16.0.19/tcp/13000/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw","/ip4/172.16.0.19/udp/13000/quic-v1/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw"],"discovery_addresses":["/ip4/172.16.0.19/udp/12000/p2p/16Uiu2HAm7xzhnGwea8gkcxRSC6fzUkvryP6d9HdWNkoeTkj6RSqw"],"metadata":{"seq_number":"3","attnets":"0x0000000000300000","syncnets":"0x05","custody_group_count":"64"}}} ``` ``` curl -s http://127.0.0.1:32961/eth/v1/debug/beacon/data_column_sidecars/head | jq '.data | length' 64 ``` ``` curl -X 'GET' \ 'http://127.0.0.1:32961/eth/v1/beacon/blobs/head' \ -H 'accept: application/json' ``` **Which issues(s) does this PR fix?** Fixes # **Other notes for review** **Acknowledgements** - [x] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md). - [x] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd). - [x] I have added a description to this PR with sufficient context for reviewers to understand this PR. --------- Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> Co-authored-by: james-prysm <jhe@offchainlabs.com> Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
253 lines
8.3 KiB
Go
253 lines
8.3 KiB
Go
package sync
|
|
|
|
import (
|
|
"fmt"
|
|
"math/rand"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
|
mockChain "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
|
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
"github.com/OffchainLabs/prysm/v7/testing/util"
|
|
)
|
|
|
|
func TestProcessDataColumnSidecarsFromReconstruction(t *testing.T) {
|
|
const blobCount = 4
|
|
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
|
|
|
ctx := t.Context()
|
|
|
|
// Start the trusted setup.
|
|
err := kzg.Start()
|
|
require.NoError(t, err)
|
|
|
|
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
|
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
|
|
|
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
|
|
|
t.Run("not enough stored sidecars", func(t *testing.T) {
|
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
err := storage.Save(verifiedRoDataColumns[:minimumCount-1])
|
|
require.NoError(t, err)
|
|
|
|
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
|
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
|
require.NoError(t, err)
|
|
})
|
|
|
|
t.Run("all stored sidecars", func(t *testing.T) {
|
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
err := storage.Save(verifiedRoDataColumns)
|
|
require.NoError(t, err)
|
|
|
|
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
|
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
|
require.NoError(t, err)
|
|
})
|
|
|
|
t.Run("should reconstruct", func(t *testing.T) {
|
|
// Here we setup a cgc of 8, which is not realistic, since there is no
|
|
// real reason for a node to both:
|
|
// - store enough data column sidecars to enable reconstruction, and
|
|
// - custody not enough columns to enable reconstruction.
|
|
// However, for the needs of this test, this is perfectly fine.
|
|
const cgc = 8
|
|
|
|
require.NoError(t, err)
|
|
|
|
chainService := &mockChain.ChainService{}
|
|
p2p := p2ptest.NewTestP2P(t)
|
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
|
|
service := NewService(
|
|
ctx,
|
|
WithP2P(p2p),
|
|
WithDataColumnStorage(storage),
|
|
WithChainService(chainService),
|
|
WithOperationNotifier(chainService.OperationNotifier()),
|
|
)
|
|
|
|
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
|
receivedBeforeReconstruction := verifiedRoDataColumns[:minimumCount]
|
|
|
|
err = service.receiveDataColumnSidecars(ctx, receivedBeforeReconstruction)
|
|
require.NoError(t, err)
|
|
|
|
err = storage.Save(receivedBeforeReconstruction)
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, false, p2p.BroadcastCalled.Load())
|
|
|
|
// Check received indices before reconstruction.
|
|
require.Equal(t, minimumCount, uint64(len(chainService.DataColumns)))
|
|
for i, actual := range chainService.DataColumns {
|
|
require.Equal(t, uint64(i), actual.Index)
|
|
}
|
|
|
|
// Run the reconstruction.
|
|
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
|
require.NoError(t, err)
|
|
|
|
expected := make(map[uint64]bool, minimumCount+cgc)
|
|
for i := range minimumCount {
|
|
expected[i] = true
|
|
}
|
|
|
|
// The node should custody these indices.
|
|
for _, i := range [...]uint64{75, 87, 102, 117} {
|
|
expected[i] = true
|
|
}
|
|
|
|
block := roBlock.Block()
|
|
slot := block.Slot()
|
|
proposerIndex := block.ProposerIndex()
|
|
|
|
require.Equal(t, len(expected), len(chainService.DataColumns))
|
|
for _, actual := range chainService.DataColumns {
|
|
require.Equal(t, true, expected[actual.Index])
|
|
require.Equal(t, true, service.hasSeenDataColumnIndex(slot, proposerIndex, actual.Index))
|
|
}
|
|
|
|
require.Equal(t, true, p2p.BroadcastCalled.Load())
|
|
})
|
|
}
|
|
|
|
func TestComputeRandomDelay(t *testing.T) {
|
|
const (
|
|
seed = 42
|
|
expected = 746056722 * time.Nanosecond // = 0.746056722 seconds
|
|
)
|
|
slotStartTime := time.Date(2020, 12, 30, 0, 0, 0, 0, time.UTC)
|
|
|
|
service := NewService(
|
|
t.Context(),
|
|
WithP2P(p2ptest.NewTestP2P(t)),
|
|
WithReconstructionRandGen(rand.New(rand.NewSource(seed))),
|
|
)
|
|
|
|
waitingTime := service.computeRandomDelay(slotStartTime)
|
|
fmt.Print(waitingTime)
|
|
require.Equal(t, expected, waitingTime)
|
|
}
|
|
|
|
func TestSemiSupernodeReconstruction(t *testing.T) {
|
|
const blobCount = 4
|
|
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
|
|
|
ctx := t.Context()
|
|
|
|
// Start the trusted setup.
|
|
err := kzg.Start()
|
|
require.NoError(t, err)
|
|
|
|
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
|
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
|
|
|
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
|
|
|
t.Run("semi-supernode reconstruction with exactly 64 columns", func(t *testing.T) {
|
|
// Test that reconstruction works with exactly the minimum number of columns (64).
|
|
// This simulates semi-supernode mode which custodies exactly 64 columns.
|
|
require.Equal(t, uint64(64), minimumCount, "Expected minimum column count to be 64")
|
|
|
|
chainService := &mockChain.ChainService{}
|
|
p2p := p2ptest.NewTestP2P(t)
|
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
|
|
service := NewService(
|
|
ctx,
|
|
WithP2P(p2p),
|
|
WithDataColumnStorage(storage),
|
|
WithChainService(chainService),
|
|
WithOperationNotifier(chainService.OperationNotifier()),
|
|
)
|
|
|
|
// Use exactly 64 columns (minimum for reconstruction) to simulate semi-supernode mode.
|
|
// Select the first 64 columns.
|
|
semiSupernodeColumns := verifiedRoDataColumns[:minimumCount]
|
|
|
|
err = service.receiveDataColumnSidecars(ctx, semiSupernodeColumns)
|
|
require.NoError(t, err)
|
|
|
|
err = storage.Save(semiSupernodeColumns)
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, false, p2p.BroadcastCalled.Load())
|
|
|
|
// Check received indices before reconstruction.
|
|
require.Equal(t, minimumCount, uint64(len(chainService.DataColumns)))
|
|
for i, actual := range chainService.DataColumns {
|
|
require.Equal(t, uint64(i), actual.Index)
|
|
}
|
|
|
|
// Run the reconstruction.
|
|
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
|
require.NoError(t, err)
|
|
|
|
// Verify we can reconstruct all columns from just 64.
|
|
// The node should have received the initial 64 columns.
|
|
if len(chainService.DataColumns) < int(minimumCount) {
|
|
t.Fatalf("Expected at least %d columns but got %d", minimumCount, len(chainService.DataColumns))
|
|
}
|
|
|
|
block := roBlock.Block()
|
|
slot := block.Slot()
|
|
proposerIndex := block.ProposerIndex()
|
|
|
|
// Verify that we have seen at least the minimum number of columns.
|
|
seenCount := 0
|
|
for i := range numberOfColumns {
|
|
if service.hasSeenDataColumnIndex(slot, proposerIndex, i) {
|
|
seenCount++
|
|
}
|
|
}
|
|
if seenCount < int(minimumCount) {
|
|
t.Fatalf("Expected to see at least %d columns but saw %d", minimumCount, seenCount)
|
|
}
|
|
})
|
|
|
|
t.Run("semi-supernode reconstruction with random 64 columns", func(t *testing.T) {
|
|
// Test reconstruction with 64 non-contiguous columns to simulate a real scenario.
|
|
chainService := &mockChain.ChainService{}
|
|
p2p := p2ptest.NewTestP2P(t)
|
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
|
|
service := NewService(
|
|
ctx,
|
|
WithP2P(p2p),
|
|
WithDataColumnStorage(storage),
|
|
WithChainService(chainService),
|
|
WithOperationNotifier(chainService.OperationNotifier()),
|
|
)
|
|
|
|
// Select every other column to get 64 non-contiguous columns.
|
|
semiSupernodeColumns := make([]blocks.VerifiedRODataColumn, 0, minimumCount)
|
|
for i := uint64(0); i < numberOfColumns && uint64(len(semiSupernodeColumns)) < minimumCount; i += 2 {
|
|
semiSupernodeColumns = append(semiSupernodeColumns, verifiedRoDataColumns[i])
|
|
}
|
|
require.Equal(t, minimumCount, uint64(len(semiSupernodeColumns)))
|
|
|
|
err = service.receiveDataColumnSidecars(ctx, semiSupernodeColumns)
|
|
require.NoError(t, err)
|
|
|
|
err = storage.Save(semiSupernodeColumns)
|
|
require.NoError(t, err)
|
|
|
|
// Run the reconstruction.
|
|
err = service.processDataColumnSidecarsFromReconstruction(ctx, semiSupernodeColumns[0])
|
|
require.NoError(t, err)
|
|
|
|
// Verify we received the columns.
|
|
if len(chainService.DataColumns) < int(minimumCount) {
|
|
t.Fatalf("Expected at least %d columns but got %d", minimumCount, len(chainService.DataColumns))
|
|
}
|
|
})
|
|
}
|