Add support for fulu fork epoch and bpo schedule (#15975)

* wip

* fixing tests

* adding script to update workspace for eth clients

* updating test sepc to 1.6.0 and fixing broadcaster test

* fix specrefs

* more ethspecify fixes

* still trying to fix ethspecify

* fixing attestation tests

* fixing sha for consensus specs

* removing script for now until i have something more standard

* fixing more p2p tests

* fixing discovery tests

* attempting to fix discovery test flakeyness

* attempting to fix port binding issue

* more attempts to fix flakey tests

* Revert "more attempts to fix flakey tests"

This reverts commit 25e8183703.

* Revert "attempting to fix port binding issue"

This reverts commit 583df8000d.

* Revert "attempting to fix discovery test flakeyness"

This reverts commit 3c76525870.

* Revert "fixing discovery tests"

This reverts commit 8c701bf3b9.

* Revert "fixing more p2p tests"

This reverts commit 140d5db203.

* Revert "fixing attestation tests"

This reverts commit 26ded244cb.

* fixing attestation tests

* fixing more p2p tests

* fixing discovery tests

* attempting to fix discovery test flakeyness

* attempting to fix port binding issue

* more attempts to fix flakey tests

* changelog

* fixing import

* adding some missing dependencies, but  TestService_BroadcastAttestationWithDiscoveryAttempts is still failing

* attempting to fix test

* reverting test as it migrated to other pr

* reverting test

* fixing test from merge

* Fix `TestService_BroadcastAttestationWithDiscoveryAttempts`.

* Fix again `TestService_Start_OnlyStartsOnce`.

* fixing TestListenForNewNodes

* removing manual set of fulu epoch

* missed a few

* fixing subnet test

* Update beacon-chain/rpc/eth/config/handlers_test.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* removing a few more missed spots of reverting fulu epoch setting

* updating test name based on feedback

* fixing rest apis, they actually need the setting of the epoch due to the guard

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
This commit is contained in:
james-prysm
2025-11-05 14:41:36 -08:00
committed by GitHub
parent 8ad547c969
commit 8b6f187b15
13 changed files with 446 additions and 67 deletions

View File

@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz", url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
) )
consensus_spec_version = "v1.6.0-beta.2" consensus_spec_version = "v1.6.0"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests") load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests( consensus_spec_tests(
name = "consensus_spec_tests", name = "consensus_spec_tests",
flavors = { flavors = {
"general": "sha256-oEj0MTViJHjZo32nABK36gfvSXpbwkBk/jt6Mj7pWFI=", "general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
"minimal": "sha256-cS4NPv6IRBoCSmWomQ8OEo8IsVNW9YawUFqoRZQBUj4=", "minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
"mainnet": "sha256-BYuLndMPAh4p13IRJgNfVakrCVL69KRrNw2tdc3ETbE=", "mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
}, },
version = consensus_spec_version, version = consensus_spec_version,
) )
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
""", """,
integrity = "sha256-MForEP9dTe0z3ZkTHjX4H6waSkSTghf3gQHPwrSCCro=", integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:], strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version, url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
) )
@@ -327,9 +327,9 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
""", """,
integrity = "sha256-NZr/gsQK9rBHRnznlPBiNzJpK8MPMrfUa3f+QYqn1+g=", integrity = "sha256-+mqMXyboedVw8Yp0v+U9GDz98QoC1SZET8mjaKPX+AI=",
strip_prefix = "mainnet-978f1794eada6f85bee76e4d2d5959a5fb8e0cc5", strip_prefix = "mainnet-980aee8893a2291d473c38f63797d5bc370fa381",
url = "https://github.com/eth-clients/mainnet/archive/978f1794eada6f85bee76e4d2d5959a5fb8e0cc5.tar.gz", url = "https://github.com/eth-clients/mainnet/archive/980aee8893a2291d473c38f63797d5bc370fa381.tar.gz",
) )
http_archive( http_archive(

View File

@@ -51,16 +51,20 @@ func Test_commitmentsToCheck(t *testing.T) {
name: "commitments within da", name: "commitments within da",
block: func(t *testing.T) blocks.ROBlock { block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockFulu() d := util.NewBeaconBlockFulu()
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
d.Block.Slot = fulu + 100 d.Block.Slot = fulu + 100
mb := params.GetNetworkScheduleEntry(slots.ToEpoch(d.Block.Slot)).MaxBlobsPerBlock
d.Block.Body.BlobKzgCommitments = commits[:mb]
sb, err := blocks.NewSignedBeaconBlock(d) sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err) require.NoError(t, err)
rb, err := blocks.NewROBlock(sb) rb, err := blocks.NewROBlock(sb)
require.NoError(t, err) require.NoError(t, err)
return rb return rb
}, },
commits: commits[:maxBlobs], commits: func() [][]byte {
slot: fulu + 100, mb := params.GetNetworkScheduleEntry(slots.ToEpoch(fulu + 100)).MaxBlobsPerBlock
return commits[:mb]
}(),
slot: fulu + 100,
}, },
{ {
name: "commitments outside da", name: "commitments outside da",

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing" p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
@@ -218,19 +219,30 @@ func TestService_BroadcastAttestation(t *testing.T) {
func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
const port = uint(2000) const port = uint(2000)
// The DB has to be shared in all peers to avoid the
// duplicate metrics collector registration attempted.
// However, we don't care for this test.
db := testDB.SetupDB(t)
// Setup bootnode. // Setup bootnode.
cfg := &Config{PingInterval: testPingInterval} cfg := &Config{PingInterval: testPingInterval, DB: db}
cfg.UDPPort = uint(port) cfg.UDPPort = uint(port)
_, pkey := createAddrAndPrivKey(t) _, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1") ipAddr := net.ParseIP("127.0.0.1")
genesisTime := time.Now() genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32) genesisValidatorsRoot := make([]byte, 32)
s := &Service{ s := &Service{
cfg: cfg, cfg: cfg,
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot, genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
ctx: t.Context(),
custodyInfoSet: make(chan struct{}),
} }
close(s.custodyInfoSet)
bootListener, err := s.createListener(ipAddr, pkey) bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err) require.NoError(t, err)
defer bootListener.Close() defer bootListener.Close()
@@ -245,6 +257,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
Discv5BootStrapAddrs: []string{bootNode.String()}, Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 2, MaxPeers: 2,
PingInterval: testPingInterval, PingInterval: testPingInterval,
DB: db,
} }
// Setup 2 different hosts // Setup 2 different hosts
for i := uint(1); i <= 2; i++ { for i := uint(1); i <= 2; i++ {
@@ -259,7 +272,12 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot, genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
ctx: t.Context(),
custodyInfoSet: make(chan struct{}),
} }
close(s.custodyInfoSet)
listener, err := s.startDiscoveryV5(ipAddr, pkey) listener, err := s.startDiscoveryV5(ipAddr, pkey)
// Set for 2nd peer // Set for 2nd peer
if i == 2 { if i == 2 {
@@ -711,18 +729,26 @@ func TestService_BroadcastDataColumn(t *testing.T) {
// Create a host. // Create a host.
_, pkey, ipAddr := createHost(t, port) _, pkey, ipAddr := createHost(t, port)
// Create a shared DB for the service
db := testDB.SetupDB(t)
// Create and close the custody info channel immediately since custodyInfo is already set
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
service := &Service{ service := &Service{
ctx: ctx, ctx: ctx,
host: p1.BHost, host: p1.BHost,
pubsub: p1.PubSub(), pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{}, joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{}, cfg: &Config{DB: db},
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{}, subnetsLockLock: sync.Mutex{},
peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}), peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
// Create a listener. // Create a listener.

View File

@@ -136,20 +136,26 @@ func setNodeSubnets(localNode *enode.LocalNode, attSubnets []uint64) {
} }
func TestCreateListener(t *testing.T) { func TestCreateListener(t *testing.T) {
port := 1024
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
ctx: t.Context(),
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)}, cfg: &Config{UDPPort: 2200, DB: db},
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
listener, err := s.createListener(ipAddr, pkey) listener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err) require.NoError(t, err)
defer listener.Close() defer listener.Close()
assert.Equal(t, true, listener.Self().IP().Equal(ipAddr), "IP address is not the expected type") assert.Equal(t, true, listener.Self().IP().Equal(ipAddr), "IP address is not the expected type")
assert.Equal(t, port, listener.Self().UDP(), "Incorrect port number") assert.Equal(t, 2200, listener.Self().UDP(), "Incorrect port number")
pubkey := listener.Self().Pubkey() pubkey := listener.Self().Pubkey()
XisSame := pkey.PublicKey.X.Cmp(pubkey.X) == 0 XisSame := pkey.PublicKey.X.Cmp(pubkey.X) == 0
@@ -161,15 +167,21 @@ func TestCreateListener(t *testing.T) {
} }
func TestStartDiscV5_DiscoverAllPeers(t *testing.T) { func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now() genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32) genesisValidatorsRoot := make([]byte, 32)
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true}, ctx: t.Context(),
cfg: &Config{UDPPort: 6000, PingInterval: testPingInterval, DisableLivenessCheck: true, DB: db}, // Use high port to reduce conflicts
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot, genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
bootListener, err := s.createListener(ipAddr, pkey) bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err) require.NoError(t, err)
@@ -183,19 +195,26 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
var listeners []*listenerWrapper var listeners []*listenerWrapper
for i := 1; i <= 5; i++ { for i := 1; i <= 5; i++ {
port = 3000 + i port := 6000 + i // Use unique high ports for peer discovery
cfg := &Config{ cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()}, Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port), UDPPort: uint(port),
PingInterval: testPingInterval, PingInterval: testPingInterval,
DisableLivenessCheck: true, DisableLivenessCheck: true,
DB: db,
} }
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
custodyInfoSetLoop := make(chan struct{})
close(custodyInfoSetLoop)
s = &Service{ s = &Service{
ctx: t.Context(),
cfg: cfg, cfg: cfg,
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot, genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSetLoop,
} }
listener, err := s.startDiscoveryV5(ipAddr, pkey) listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node") assert.NoError(t, err, "Could not start discovery for node")
@@ -220,16 +239,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
} }
func TestCreateLocalNode(t *testing.T) { func TestCreateLocalNode(t *testing.T) {
params.SetupTestConfigCleanup(t)
// Set the fulu fork epoch to something other than the far future epoch.
initFuluForkEpoch := params.BeaconConfig().FuluForkEpoch
params.BeaconConfig().FuluForkEpoch = 42
defer func() {
params.BeaconConfig().FuluForkEpoch = initFuluForkEpoch
}()
testCases := []struct { testCases := []struct {
name string name string
cfg *Config cfg *Config
@@ -264,11 +273,11 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// Define ports. // Define ports. Use unique ports since this test validates ENR content.
const ( const (
udpPort = 2000 udpPort = 3100
tcpPort = 3000 tcpPort = 3101
quicPort = 3000 quicPort = 3102
) )
custodyRequirement := params.BeaconConfig().CustodyRequirement custodyRequirement := params.BeaconConfig().CustodyRequirement
@@ -344,13 +353,19 @@ func TestCreateLocalNode(t *testing.T) {
} }
func TestRebootDiscoveryListener(t *testing.T) { func TestRebootDiscoveryListener(t *testing.T) {
port := 1024
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
ctx: t.Context(),
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)}, cfg: &Config{UDPPort: 0, DB: db}, // Use 0 to let OS assign an available port
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
createListener := func() (*discover.UDPv5, error) { createListener := func() (*discover.UDPv5, error) {
@@ -379,11 +394,17 @@ func TestRebootDiscoveryListener(t *testing.T) {
func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) { func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
addr := net.ParseIP("invalidIP") addr := net.ParseIP("invalidIP")
_, pkey := createAddrAndPrivKey(t) _, pkey := createAddrAndPrivKey(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
ctx: t.Context(),
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{}, cfg: &Config{},
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
node, err := s.createLocalNode(pkey, addr, 0, 0, 0) node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
require.NoError(t, err) require.NoError(t, err)
@@ -394,15 +415,23 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
func TestMultiAddrConversion_OK(t *testing.T) { func TestMultiAddrConversion_OK(t *testing.T) {
hook := logTest.NewGlobal() hook := logTest.NewGlobal()
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
ctx: t.Context(),
cfg: &Config{ cfg: &Config{
UDPPort: 2000, UDPPort: 0, // Use 0 to let OS assign an available port
TCPPort: 3000, TCPPort: 0,
QUICPort: 3000, QUICPort: 0,
DB: db,
}, },
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
listener, err := s.createListener(ipAddr, pkey) listener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err) require.NoError(t, err)
@@ -472,13 +501,20 @@ func TestHostIsResolved(t *testing.T) {
"2001:4860:4860::8844": true, "2001:4860:4860::8844": true,
} }
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
ctx: t.Context(),
cfg: &Config{ cfg: &Config{
HostDNS: host, HostDNS: host,
DB: db,
}, },
genesisTime: time.Now(), genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
ip, key := createAddrAndPrivKey(t) ip, key := createAddrAndPrivKey(t)
list, err := s.createListener(ip, key) list, err := s.createListener(ip, key)
@@ -540,15 +576,21 @@ func TestOutboundPeerThreshold(t *testing.T) {
} }
func TestUDPMultiAddress(t *testing.T) { func TestUDPMultiAddress(t *testing.T) {
port := 6500
ipAddr, pkey := createAddrAndPrivKey(t) ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now() genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32) genesisValidatorsRoot := make([]byte, 32)
db := testDB.SetupDB(t)
custodyInfoSet := make(chan struct{})
close(custodyInfoSet)
s := &Service{ s := &Service{
cfg: &Config{UDPPort: uint(port)}, ctx: t.Context(),
cfg: &Config{UDPPort: 2500, DB: db},
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot, genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: custodyInfoSet,
} }
createListener := func() (*discover.UDPv5, error) { createListener := func() (*discover.UDPv5, error) {
@@ -562,7 +604,7 @@ func TestUDPMultiAddress(t *testing.T) {
multiAddresses, err := s.DiscoveryAddresses() multiAddresses, err := s.DiscoveryAddresses()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, true, len(multiAddresses) > 0) require.Equal(t, true, len(multiAddresses) > 0)
assert.Equal(t, true, strings.Contains(multiAddresses[0].String(), fmt.Sprintf("%d", port))) assert.Equal(t, true, strings.Contains(multiAddresses[0].String(), fmt.Sprintf("%d", 2500)))
assert.Equal(t, true, strings.Contains(multiAddresses[0].String(), "udp")) assert.Equal(t, true, strings.Contains(multiAddresses[0].String(), "udp"))
} }
@@ -912,7 +954,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
actualPingCount++ actualPingCount++
return nil return nil
}, },
cfg: &Config{UDPPort: 2000, DB: testDB.SetupDB(t)}, cfg: &Config{UDPPort: 0, DB: testDB.SetupDB(t)}, // Use 0 to let OS assign an available port
peers: p2p.Peers(), peers: p2p.Peers(),
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second), genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),

View File

@@ -58,14 +58,13 @@ func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
} }
func TestService_Start_OnlyStartsOnce(t *testing.T) { func TestService_Start_OnlyStartsOnce(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal() hook := logTest.NewGlobal()
cs := startup.NewClockSynchronizer() cs := startup.NewClockSynchronizer()
cfg := &Config{ cfg := &Config{
UDPPort: 2000, UDPPort: 0, // Use 0 to let OS assign an available port
TCPPort: 3000, TCPPort: 0,
QUICPort: 3000, QUICPort: 0,
ClockWaiter: cs, ClockWaiter: cs,
DB: testDB.SetupDB(t), DB: testDB.SetupDB(t),
} }
@@ -73,6 +72,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
s.dv5Listener = testp2p.NewMockListener(nil, nil) s.dv5Listener = testp2p.NewMockListener(nil, nil)
s.custodyInfo = &custodyInfo{} s.custodyInfo = &custodyInfo{}
close(s.custodyInfoSet)
exitRoutine := make(chan bool) exitRoutine := make(chan bool)
go func() { go func() {
s.Start() s.Start()
@@ -111,9 +111,9 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
cs := startup.NewClockSynchronizer() cs := startup.NewClockSynchronizer()
cfg := &Config{ cfg := &Config{
UDPPort: 2000, UDPPort: 0, // Use 0 to let OS assign an available port
TCPPort: 3000, TCPPort: 0,
QUICPort: 3000, QUICPort: 0,
StateNotifier: &mock.MockStateNotifier{}, StateNotifier: &mock.MockStateNotifier{},
NoDiscovery: true, // <-- no s.dv5Listener is created NoDiscovery: true, // <-- no s.dv5Listener is created
ClockWaiter: cs, ClockWaiter: cs,
@@ -147,12 +147,11 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
func TestListenForNewNodes(t *testing.T) { func TestListenForNewNodes(t *testing.T) {
const ( const (
port = uint(2000) bootPort = uint(2200) // Use specific port for bootnode ENR
testPollingPeriod = 1 * time.Second testPollingPeriod = 1 * time.Second
peerCount = 5 peerCount = 5
) )
params.SetupTestConfigCleanup(t)
db := testDB.SetupDB(t) db := testDB.SetupDB(t)
// Setup bootnode. // Setup bootnode.
@@ -160,7 +159,7 @@ func TestListenForNewNodes(t *testing.T) {
StateNotifier: &mock.MockStateNotifier{}, StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval, PingInterval: testPingInterval,
DisableLivenessCheck: true, DisableLivenessCheck: true,
UDPPort: port, UDPPort: bootPort,
DB: db, DB: db,
} }
@@ -171,10 +170,13 @@ func TestListenForNewNodes(t *testing.T) {
s := &Service{ s := &Service{
cfg: cfg, cfg: cfg,
ctx: t.Context(),
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: gvr[:], genesisValidatorsRoot: gvr[:],
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: make(chan struct{}),
} }
close(s.custodyInfoSet)
bootListener, err := s.createListener(ipAddr, pkey) bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err) require.NoError(t, err)
@@ -199,25 +201,29 @@ func TestListenForNewNodes(t *testing.T) {
hosts := make([]host.Host, 0, peerCount) hosts := make([]host.Host, 0, peerCount)
for i := uint(1); i <= peerCount; i++ { for i := uint(1); i <= peerCount; i++ {
peerPort := bootPort + i
cfg = &Config{ cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()}, Discv5BootStrapAddrs: []string{bootNode.String()},
PingInterval: testPingInterval, PingInterval: testPingInterval,
DisableLivenessCheck: true, DisableLivenessCheck: true,
MaxPeers: peerCount, MaxPeers: peerCount,
ClockWaiter: cs, ClockWaiter: cs,
UDPPort: port + i, UDPPort: peerPort,
TCPPort: port + i, TCPPort: peerPort,
DB: db, DB: db,
} }
h, pkey, ipAddr := createHost(t, port+i) h, pkey, ipAddr := createHost(t, peerPort)
s := &Service{ s := &Service{
cfg: cfg, cfg: cfg,
ctx: t.Context(),
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: gvr[:], genesisValidatorsRoot: gvr[:],
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: make(chan struct{}),
} }
close(s.custodyInfoSet)
listener, err := s.startDiscoveryV5(ipAddr, pkey) listener, err := s.startDiscoveryV5(ipAddr, pkey)
require.NoError(t, err, "Could not start discovery for node") require.NoError(t, err, "Could not start discovery for node")
@@ -247,6 +253,7 @@ func TestListenForNewNodes(t *testing.T) {
s, err = NewService(t.Context(), cfg) s, err = NewService(t.Context(), cfg)
require.NoError(t, err) require.NoError(t, err)
s.custodyInfo = &custodyInfo{} s.custodyInfo = &custodyInfo{}
close(s.custodyInfoSet)
go s.Start() go s.Start()
@@ -270,7 +277,6 @@ func TestListenForNewNodes(t *testing.T) {
} }
func TestPeer_Disconnect(t *testing.T) { func TestPeer_Disconnect(t *testing.T) {
params.SetupTestConfigCleanup(t)
h1, _, _ := createHost(t, 5000) h1, _, _ := createHost(t, 5000)
defer func() { defer func() {
if err := h1.Close(); err != nil { if err := h1.Close(); err != nil {

View File

@@ -69,10 +69,13 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
bootNodeService := &Service{ bootNodeService := &Service{
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval}, cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
ctx: ctx,
genesisTime: genesisTime, genesisTime: genesisTime,
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:], genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
custodyInfo: &custodyInfo{}, custodyInfo: &custodyInfo{},
custodyInfoSet: make(chan struct{}),
} }
close(bootNodeService.custodyInfoSet)
bootNodeForkDigest, err := bootNodeService.currentForkDigest() bootNodeForkDigest, err := bootNodeService.currentForkDigest()
require.NoError(t, err) require.NoError(t, err)
@@ -102,6 +105,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
PingInterval: testPingInterval, PingInterval: testPingInterval,
DisableLivenessCheck: true, DisableLivenessCheck: true,
DB: db, DB: db,
DataDir: t.TempDir(), // Unique data dir for each peer
}) })
require.NoError(t, err) require.NoError(t, err)
@@ -109,6 +113,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
service.genesisTime = genesisTime service.genesisTime = genesisTime
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:] service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
service.custodyInfo = &custodyInfo{} service.custodyInfo = &custodyInfo{}
close(service.custodyInfoSet)
nodeForkDigest, err := service.currentForkDigest() nodeForkDigest, err := service.currentForkDigest()
require.NoError(t, err) require.NoError(t, err)
@@ -152,6 +157,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
TCPPort: 3010, TCPPort: 3010,
QUICPort: 3010, QUICPort: 3010,
DB: db, DB: db,
DataDir: t.TempDir(), // Unique data dir for test service
} }
service, err := NewService(t.Context(), cfg) service, err := NewService(t.Context(), cfg)
@@ -160,6 +166,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
service.genesisTime = genesisTime service.genesisTime = genesisTime
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:] service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
service.custodyInfo = &custodyInfo{} service.custodyInfo = &custodyInfo{}
close(service.custodyInfoSet)
service.Start() service.Start()
defer func() { defer func() {

View File

@@ -300,6 +300,7 @@ func TestListAttestationsV2(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
config.ElectraForkEpoch = 0 config.ElectraForkEpoch = 0
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config) params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs} chainService := &blockchainmock.ChainService{State: bs}
@@ -357,6 +358,12 @@ func TestListAttestationsV2(t *testing.T) {
writer := httptest.NewRecorder() writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{} writer.Body = &bytes.Buffer{}
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ElectraForkEpoch = 0
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config)
s.ListAttestationsV2(writer, request) s.ListAttestationsV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code) assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{} resp := &structs.ListAttestationsResponse{}
@@ -394,6 +401,186 @@ func TestListAttestationsV2(t *testing.T) {
assert.Equal(t, "0x0400000000000000", a.CommitteeBits) assert.Equal(t, "0x0400000000000000", a.CommitteeBits)
} }
}) })
t.Run("Post-Fulu", func(t *testing.T) {
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(1, true)
cb2 := primitives.NewAttestationCommitteeBits()
cb2.SetBitAt(2, true)
attFulu1 := &ethpbv1alpha1.AttestationElectra{
AggregationBits: []byte{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
CommitteeBits: cb1,
Signature: bytesutil.PadTo([]byte("signature1"), 96),
}
attFulu2 := &ethpbv1alpha1.AttestationElectra{
AggregationBits: []byte{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
CommitteeBits: cb2,
Signature: bytesutil.PadTo([]byte("signature2"), 96),
}
attFulu3 := &ethpbv1alpha1.AttestationElectra{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
},
},
CommitteeBits: cb1,
Signature: bytesutil.PadTo([]byte("signature3"), 96),
}
attFulu4 := &ethpbv1alpha1.AttestationElectra{
AggregationBits: bitfield.NewBitlist(8),
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
},
},
CommitteeBits: cb2,
Signature: bytesutil.PadTo([]byte("signature4"), 96),
}
bs, err := util.NewBeaconStateFulu()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ElectraForkEpoch = 0
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
AttestationsPool: attestations.NewPool(),
ChainInfoFetcher: chainService,
TimeFetcher: chainService,
}
// Added one pre electra attestation to ensure it is ignored.
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attFulu1, attFulu2, att1}))
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attFulu3, attFulu4, att3}))
t.Run("empty request", func(t *testing.T) {
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestationsV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
var atts []*structs.AttestationElectra
require.NoError(t, json.Unmarshal(resp.Data, &atts))
assert.Equal(t, 4, len(atts))
assert.Equal(t, "fulu", resp.Version)
})
t.Run("slot request", func(t *testing.T) {
url := "http://example.com?slot=2"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestationsV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
var atts []*structs.AttestationElectra
require.NoError(t, json.Unmarshal(resp.Data, &atts))
assert.Equal(t, 2, len(atts))
assert.Equal(t, "fulu", resp.Version)
for _, a := range atts {
assert.Equal(t, "2", a.Data.Slot)
}
})
t.Run("index request", func(t *testing.T) {
url := "http://example.com?committee_index=2"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestationsV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
var atts []*structs.AttestationElectra
require.NoError(t, json.Unmarshal(resp.Data, &atts))
assert.Equal(t, 2, len(atts))
assert.Equal(t, "fulu", resp.Version)
for _, a := range atts {
assert.Equal(t, "0x0400000000000000", a.CommitteeBits)
}
})
t.Run("both slot + index request", func(t *testing.T) {
url := "http://example.com?slot=2&committee_index=2"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.ListAttestationsV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.ListAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
var atts []*structs.AttestationElectra
require.NoError(t, json.Unmarshal(resp.Data, &atts))
assert.Equal(t, 1, len(atts))
assert.Equal(t, "fulu", resp.Version)
for _, a := range atts {
assert.Equal(t, "2", a.Data.Slot)
assert.Equal(t, "0x0400000000000000", a.CommitteeBits)
}
})
})
}) })
} }
@@ -494,6 +681,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
config.ElectraForkEpoch = 0 config.ElectraForkEpoch = 0
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config) params.OverrideBeaconConfig(config)
var body bytes.Buffer var body bytes.Buffer
@@ -574,6 +762,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature")) assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
}) })
}) })
t.Run("post-electra", func(t *testing.T) { t.Run("post-electra", func(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
@@ -1462,6 +1651,7 @@ func TestGetAttesterSlashingsV2(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
config.ElectraForkEpoch = 100 config.ElectraForkEpoch = 100
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config) params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs} chainService := &blockchainmock.ChainService{State: bs}
@@ -1493,6 +1683,7 @@ func TestGetAttesterSlashingsV2(t *testing.T) {
require.DeepEqual(t, slashingPostElectra, ss[0]) require.DeepEqual(t, slashingPostElectra, ss[0])
}) })
t.Run("post-electra-ok", func(t *testing.T) { t.Run("post-electra-ok", func(t *testing.T) {
bs, err := util.NewBeaconStateElectra() bs, err := util.NewBeaconStateElectra()
require.NoError(t, err) require.NoError(t, err)
@@ -1500,6 +1691,7 @@ func TestGetAttesterSlashingsV2(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
config.ElectraForkEpoch = 100 config.ElectraForkEpoch = 100
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config) params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs} chainService := &blockchainmock.ChainService{State: bs}
@@ -1570,6 +1762,7 @@ func TestGetAttesterSlashingsV2(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
config.ElectraForkEpoch = 100 config.ElectraForkEpoch = 100
config.FuluForkEpoch = config.FarFutureEpoch
params.OverrideBeaconConfig(config) params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs} chainService := &blockchainmock.ChainService{State: bs}
@@ -1596,6 +1789,83 @@ func TestGetAttesterSlashingsV2(t *testing.T) {
require.NoError(t, json.Unmarshal(resp.Data, &slashings)) require.NoError(t, json.Unmarshal(resp.Data, &slashings))
require.NotNil(t, slashings) require.NotNil(t, slashings)
require.Equal(t, 0, len(slashings)) require.Equal(t, 0, len(slashings))
t.Run("Post-Fulu", func(t *testing.T) {
t.Run("post-fulu-ok", func(t *testing.T) {
bs, err := util.NewBeaconStateFulu()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ElectraForkEpoch = 0
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: chainService,
TimeFetcher: chainService,
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashingPostElectra}},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetAttesterSlashingsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetAttesterSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, "fulu", resp.Version)
// Unmarshal resp.Data into a slice of slashings
var slashings []*structs.AttesterSlashingElectra
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
require.NoError(t, err)
require.DeepEqual(t, slashingPostElectra, ss[0])
})
t.Run("no-slashings", func(t *testing.T) {
bs, err := util.NewBeaconStateFulu()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ElectraForkEpoch = 0
config.FuluForkEpoch = 0
params.OverrideBeaconConfig(config)
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: chainService,
TimeFetcher: chainService,
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetAttesterSlashingsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetAttesterSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, "fulu", resp.Version)
// Unmarshal resp.Data into a slice of slashings
var slashings []*structs.AttesterSlashingElectra
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
require.NotNil(t, slashings)
require.Equal(t, 0, len(slashings))
})
})
}) })
} }

View File

@@ -568,10 +568,9 @@ func TestGetSpec(t *testing.T) {
case "SYNC_MESSAGE_DUE_BPS": case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "104", v) assert.Equal(t, "104", v)
case "BLOB_SCHEDULE": case "BLOB_SCHEDULE":
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
blobSchedule, ok := v.([]interface{}) blobSchedule, ok := v.([]interface{})
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.Equal(t, 0, len(blobSchedule)) assert.Equal(t, 2, len(blobSchedule))
default: default:
t.Errorf("Incorrect key: %s", k) t.Errorf("Incorrect key: %s", k)
} }

View File

@@ -0,0 +1,8 @@
### Added
- Fulu fork epoch for mainnet configurations set for December 3, 2025, 09:49:11pm UTC
- Added BPO schedules for December 9, 2025, 02:21:11pm UTC and January 7, 2026, 01:01:11am UTC
### Changed
- updated consensus spec to 1.6.0 from 1.6.0-beta.2

View File

@@ -130,10 +130,10 @@ func TestNextForkData(t *testing.T) {
wantedEpoch: cfg.BellatrixForkEpoch, wantedEpoch: cfg.BellatrixForkEpoch,
}, },
{ {
name: "after last bpo - should be far future epoch and 0x00000000", name: "post last full fork, fulu bpo 1",
currEpoch: params.LastForkEpoch() + 1, currEpoch: params.LastForkEpoch() + 1,
wantedForkVersion: [4]byte(cfg.ElectraForkVersion), wantedForkVersion: [4]byte(cfg.FuluForkVersion),
wantedEpoch: cfg.ElectraForkEpoch, wantedEpoch: cfg.BlobSchedule[0].Epoch,
}, },
} }
for _, tt := range tests { for _, tt := range tests {

View File

@@ -30,7 +30,7 @@ const (
// Electra Fork Epoch for mainnet config // Electra Fork Epoch for mainnet config
mainnetElectraForkEpoch = 364032 // May 7, 2025, 10:05:11 UTC mainnetElectraForkEpoch = 364032 // May 7, 2025, 10:05:11 UTC
// Fulu Fork Epoch for mainnet config // Fulu Fork Epoch for mainnet config
mainnetFuluForkEpoch = math.MaxUint64 // Far future / to be defined mainnetFuluForkEpoch = 411392 // December 3, 2025, 09:49:11pm UTC
) )
var mainnetNetworkConfig = &NetworkConfig{ var mainnetNetworkConfig = &NetworkConfig{
@@ -338,7 +338,16 @@ var mainnetBeaconConfig = &BeaconChainConfig{
SubnetsPerNode: 2, SubnetsPerNode: 2,
NodeIdBits: 256, NodeIdBits: 256,
BlobSchedule: []BlobScheduleEntry{}, BlobSchedule: []BlobScheduleEntry{
{
Epoch: 412672, // December 9, 2025, 02:21:11pm UTC
MaxBlobsPerBlock: 15,
},
{
Epoch: 419072, // January 7, 2026, 01:01:11am UTC
MaxBlobsPerBlock: 21,
},
},
} }
// MainnetTestConfig provides a version of the mainnet config that has a different name // MainnetTestConfig provides a version of the mainnet config that has a different name

View File

@@ -1,4 +1,4 @@
version: v1.6.0-beta.2 version: v1.6.0
style: full style: full
specrefs: specrefs:

View File

@@ -108,8 +108,16 @@
search: BlobSchedule\s+\[]BlobScheduleEntry search: BlobSchedule\s+\[]BlobScheduleEntry
regex: true regex: true
spec: | spec: |
<spec config_var="BLOB_SCHEDULE" fork="fulu" hash="f3f1064a"> <spec config_var="BLOB_SCHEDULE" fork="fulu" hash="07879110">
BLOB_SCHEDULE: tuple[frozendict[str, Any], ...] = ( BLOB_SCHEDULE: tuple[frozendict[str, Any], ...] = (
frozendict({
"EPOCH": 412672,
"MAX_BLOBS_PER_BLOCK": 15,
}),
frozendict({
"EPOCH": 419072,
"MAX_BLOBS_PER_BLOCK": 21,
}),
) )
</spec> </spec>
@@ -266,8 +274,8 @@
search: FuluForkEpoch\s+primitives.Epoch search: FuluForkEpoch\s+primitives.Epoch
regex: true regex: true
spec: | spec: |
<spec config_var="FULU_FORK_EPOCH" fork="fulu" hash="673334be"> <spec config_var="FULU_FORK_EPOCH" fork="fulu" hash="af10fa3c">
FULU_FORK_EPOCH: Epoch = 18446744073709551615 FULU_FORK_EPOCH: Epoch = 411392
</spec> </spec>
- name: FULU_FORK_VERSION - name: FULU_FORK_VERSION