mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
26 Commits
use-depend
...
electra-wi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5613fc832b | ||
|
|
b4a66a0993 | ||
|
|
d38064f181 | ||
|
|
7f89bb3c6f | ||
|
|
a69c033e35 | ||
|
|
6faa112197 | ||
|
|
a107802a9a | ||
|
|
35151c7bc8 | ||
|
|
c07479b99a | ||
|
|
0d3b7f0ade | ||
|
|
dd9a5fba59 | ||
|
|
7da7019a20 | ||
|
|
24cf930952 | ||
|
|
97a95dddfc | ||
|
|
6df476835c | ||
|
|
204302a821 | ||
|
|
8a22df902f | ||
|
|
a0ec495086 | ||
|
|
5e3a5b877a | ||
|
|
b69c71d65a | ||
|
|
1298dc3a46 | ||
|
|
3c463d8171 | ||
|
|
0a48fafc71 | ||
|
|
28cd59c9b7 | ||
|
|
0f3c11d622 | ||
|
|
e3d05bc880 |
10
WORKSPACE
10
WORKSPACE
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.5"
|
||||
consensus_spec_version = "v1.5.0"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-H+Pt4z+HCVDnEBAv814yvsjR7f5l1IpumjFoTj2XnLE=",
|
||||
integrity = "sha256-JljxS/if/t0qvGWcf5CgsX+72fj90yGTg/uEgC56y7U=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Dqiwf5BG7yYyURGf+i87AIdArAyztvcgjoi2kSxrGvo=",
|
||||
integrity = "sha256-NRba2h4zqb2LAXyDPglHTtkT4gVyuwpY708XmwXKXV8=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-xrmsFF243pzXHAjh1EQYKS9gtcwmtHK3wRZDSLlVVRk=",
|
||||
integrity = "sha256-hpbtKUbc3NHtVcUPk/Zm+Hn57G2ijI9qvXJwl9hc/tM=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-c+gGapqifCvFtmtxfhOwieBDO2Syxp13GECWEpWM/Ho=",
|
||||
integrity = "sha256-Wy3YcJxoXiKQwrGgJecrtjtdokc4X/VUNBmyQXJf0Oc=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -57,6 +57,7 @@ go_test(
|
||||
deps = [
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -26,10 +26,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -116,10 +113,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -225,10 +219,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -347,10 +338,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -488,10 +476,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -629,10 +614,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
@@ -815,10 +797,7 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"testing"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func TestDepositSnapshotFromConsensus(t *testing.T) {
|
||||
@@ -102,12 +104,266 @@ func TestProposerSlashing_ToConsensus(t *testing.T) {
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposerSlashing_FromConsensus(t *testing.T) {
|
||||
input := []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 2,
|
||||
ParentRoot: []byte{3},
|
||||
StateRoot: []byte{4},
|
||||
BodyRoot: []byte{5},
|
||||
},
|
||||
Signature: []byte{6},
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 7,
|
||||
ProposerIndex: 8,
|
||||
ParentRoot: []byte{9},
|
||||
StateRoot: []byte{10},
|
||||
BodyRoot: []byte{11},
|
||||
},
|
||||
Signature: []byte{12},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 13,
|
||||
ProposerIndex: 14,
|
||||
ParentRoot: []byte{15},
|
||||
StateRoot: []byte{16},
|
||||
BodyRoot: []byte{17},
|
||||
},
|
||||
Signature: []byte{18},
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 19,
|
||||
ProposerIndex: 20,
|
||||
ParentRoot: []byte{21},
|
||||
StateRoot: []byte{22},
|
||||
BodyRoot: []byte{23},
|
||||
},
|
||||
Signature: []byte{24},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedResult := []*ProposerSlashing{
|
||||
{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "1",
|
||||
ProposerIndex: "2",
|
||||
ParentRoot: hexutil.Encode([]byte{3}),
|
||||
StateRoot: hexutil.Encode([]byte{4}),
|
||||
BodyRoot: hexutil.Encode([]byte{5}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{6}),
|
||||
},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "7",
|
||||
ProposerIndex: "8",
|
||||
ParentRoot: hexutil.Encode([]byte{9}),
|
||||
StateRoot: hexutil.Encode([]byte{10}),
|
||||
BodyRoot: hexutil.Encode([]byte{11}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{12}),
|
||||
},
|
||||
},
|
||||
{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "13",
|
||||
ProposerIndex: "14",
|
||||
ParentRoot: hexutil.Encode([]byte{15}),
|
||||
StateRoot: hexutil.Encode([]byte{16}),
|
||||
BodyRoot: hexutil.Encode([]byte{17}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{18}),
|
||||
},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{
|
||||
Message: &BeaconBlockHeader{
|
||||
Slot: "19",
|
||||
ProposerIndex: "20",
|
||||
ParentRoot: hexutil.Encode([]byte{21}),
|
||||
StateRoot: hexutil.Encode([]byte{22}),
|
||||
BodyRoot: hexutil.Encode([]byte{23}),
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{24}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := ProposerSlashingsFromConsensus(input)
|
||||
assert.DeepEqual(t, expectedResult, result)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_ToConsensus(t *testing.T) {
|
||||
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_FromConsensus(t *testing.T) {
|
||||
input := []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 2},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{5},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 6,
|
||||
Root: []byte{7},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 8,
|
||||
Root: []byte{9},
|
||||
},
|
||||
},
|
||||
Signature: []byte{10},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{11, 12},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 13,
|
||||
CommitteeIndex: 14,
|
||||
BeaconBlockRoot: []byte{15},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 16,
|
||||
Root: []byte{17},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 18,
|
||||
Root: []byte{19},
|
||||
},
|
||||
},
|
||||
Signature: []byte{20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{21, 22},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 23,
|
||||
CommitteeIndex: 24,
|
||||
BeaconBlockRoot: []byte{25},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 26,
|
||||
Root: []byte{27},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 28,
|
||||
Root: []byte{29},
|
||||
},
|
||||
},
|
||||
Signature: []byte{30},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{31, 32},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 33,
|
||||
CommitteeIndex: 34,
|
||||
BeaconBlockRoot: []byte{35},
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 36,
|
||||
Root: []byte{37},
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 38,
|
||||
Root: []byte{39},
|
||||
},
|
||||
},
|
||||
Signature: []byte{40},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedResult := []*AttesterSlashing{
|
||||
{
|
||||
Attestation1: &IndexedAttestation{
|
||||
AttestingIndices: []string{"1", "2"},
|
||||
Data: &AttestationData{
|
||||
Slot: "3",
|
||||
CommitteeIndex: "4",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{5}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "6",
|
||||
Root: hexutil.Encode([]byte{7}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "8",
|
||||
Root: hexutil.Encode([]byte{9}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{10}),
|
||||
},
|
||||
Attestation2: &IndexedAttestation{
|
||||
AttestingIndices: []string{"11", "12"},
|
||||
Data: &AttestationData{
|
||||
Slot: "13",
|
||||
CommitteeIndex: "14",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{15}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "16",
|
||||
Root: hexutil.Encode([]byte{17}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "18",
|
||||
Root: hexutil.Encode([]byte{19}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{20}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Attestation1: &IndexedAttestation{
|
||||
AttestingIndices: []string{"21", "22"},
|
||||
Data: &AttestationData{
|
||||
Slot: "23",
|
||||
CommitteeIndex: "24",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{25}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "26",
|
||||
Root: hexutil.Encode([]byte{27}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "28",
|
||||
Root: hexutil.Encode([]byte{29}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{30}),
|
||||
},
|
||||
Attestation2: &IndexedAttestation{
|
||||
AttestingIndices: []string{"31", "32"},
|
||||
Data: &AttestationData{
|
||||
Slot: "33",
|
||||
CommitteeIndex: "34",
|
||||
BeaconBlockRoot: hexutil.Encode([]byte{35}),
|
||||
Source: &Checkpoint{
|
||||
Epoch: "36",
|
||||
Root: hexutil.Encode([]byte{37}),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: "38",
|
||||
Root: hexutil.Encode([]byte{39}),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode([]byte{40}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := AttesterSlashingsFromConsensus(input)
|
||||
assert.DeepEqual(t, expectedResult, result)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
|
||||
@@ -253,7 +253,7 @@ type PendingDeposit struct {
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
Index string `json:"index"`
|
||||
Index string `json:"validator_index"`
|
||||
Amount string `json:"amount"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
@@ -163,6 +163,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -184,7 +184,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(f event.SubscriberSender, block interfaces.ReadOnlySignedBeaconBlock, root [32]byte, nextSlot primitives.Slot) {
|
||||
func (s *Service) firePayloadAttributesEvent(f event.SubscriberSender, block interfaces.ReadOnlySignedBeaconBlock, root [32]byte, nextSlot primitives.Slot) {
|
||||
// If we're syncing a block in the past and init-sync is still running, we shouldn't fire this event.
|
||||
if !s.cfg.SyncChecker.Synced() {
|
||||
return
|
||||
}
|
||||
// the fcu args have differing amounts of completeness based on the code path,
|
||||
// and there is work we only want to do if a client is actually listening to the events beacon api endpoint.
|
||||
// temporary solution: just fire a blank event and fill in the details in the api handler.
|
||||
|
||||
@@ -102,7 +102,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
go firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
|
||||
@@ -30,7 +30,8 @@ go_test(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
|
||||
@@ -37,7 +41,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
blob := getRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
@@ -45,3 +49,36 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) GoKZG.Blob {
|
||||
var blob GoKZG.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
WithStateNotifier(&mock.MockStateNotifier{RecordEvents: true}),
|
||||
WithSyncChecker(&mock.MockSyncChecker{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -735,7 +735,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -103,15 +103,29 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
}
|
||||
currEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
currDependenRoot, err := s.cfg.ForkChoiceStore.DependentRoot(currEpoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not get dependent root")
|
||||
}
|
||||
prevDependentRoot := [32]byte{}
|
||||
if currEpoch > 0 {
|
||||
prevDependentRoot, err = s.cfg.ForkChoiceStore.DependentRoot(currEpoch - 1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not get previous dependent root")
|
||||
}
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
CurrDependentRoot: currDependenRoot,
|
||||
PrevDependentRoot: prevDependentRoot,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -295,6 +309,11 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2p.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -344,6 +363,10 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2p.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -3309,6 +3310,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2p = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
@@ -3444,6 +3446,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2p = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -66,6 +67,16 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastLightClientOptimisticUpdate(_ context.Context, _ interfaces.LightClientOptimisticUpdate) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfaces.LightClientFinalityUpdate) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -719,3 +719,14 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
synced bool
|
||||
}
|
||||
|
||||
// Synced satisfies the blockchain.Checker interface.
|
||||
func (m *MockSyncChecker) Synced() bool {
|
||||
return m.synced
|
||||
}
|
||||
|
||||
@@ -67,10 +67,6 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -83,7 +79,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -82,10 +82,8 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), aState.StateRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := aState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r1 := preForkState.HistoricalRoots()
|
||||
r2 := aState.HistoricalRoots()
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), aState.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), aState.Eth1DataVotes())
|
||||
|
||||
@@ -42,10 +42,6 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -58,7 +54,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -57,10 +57,6 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
@@ -74,7 +70,7 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -47,10 +47,8 @@ func TestUpgradeToDeneb(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr1 := preForkState.HistoricalRoots()
|
||||
hr2 := mSt.HistoricalRoots()
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
|
||||
@@ -170,10 +170,6 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := beaconState.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -223,7 +219,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
|
||||
@@ -77,10 +77,8 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr1 := preForkState.HistoricalRoots()
|
||||
hr2 := mSt.HistoricalRoots()
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
|
||||
@@ -148,8 +148,7 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) {
|
||||
assert.DeepNotEqual(t, params.BeaconConfig().ZeroHash[:], mix, "latest RANDAO still zero hashes")
|
||||
|
||||
// Verify historical root accumulator was appended.
|
||||
roots, err := newS.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
roots := newS.HistoricalRoots()
|
||||
assert.Equal(t, 1, len(roots), "Unexpected slashed balance")
|
||||
currAtt, err := newS.CurrentEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
@@ -379,8 +378,7 @@ func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
roots := st.HistoricalRoots()
|
||||
require.Equal(t, 0, len(roots))
|
||||
},
|
||||
},
|
||||
@@ -393,8 +391,7 @@ func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
roots := st.HistoricalRoots()
|
||||
require.Equal(t, 1, len(roots))
|
||||
|
||||
b := ðpb.HistoricalBatch{
|
||||
@@ -431,8 +428,7 @@ func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
StateSummaryRoot: sr[:],
|
||||
}
|
||||
require.DeepEqual(t, b, summaries[0])
|
||||
hrs, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hrs := st.HistoricalRoots()
|
||||
require.DeepEqual(t, hrs, [][]byte{})
|
||||
},
|
||||
},
|
||||
|
||||
@@ -35,10 +35,6 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -51,7 +47,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -24,10 +24,8 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r1 := preForkState.HistoricalRoots()
|
||||
r2 := mSt.HistoricalRoots()
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
|
||||
@@ -11,6 +11,8 @@ const (
|
||||
|
||||
// ReceivedBlockData is the data sent with ReceivedBlock events.
|
||||
type ReceivedBlockData struct {
|
||||
SignedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
IsOptimistic bool
|
||||
SignedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
CurrDependentRoot [32]byte
|
||||
PrevDependentRoot [32]byte
|
||||
IsOptimistic bool
|
||||
}
|
||||
|
||||
@@ -43,6 +43,10 @@ type BlockProcessedData struct {
|
||||
BlockRoot [32]byte
|
||||
// SignedBlock is the physical processed block.
|
||||
SignedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
// CurrDependentRoot is the current dependent root
|
||||
CurrDependentRoot [32]byte
|
||||
// PrevDependentRoot is the previous dependent root
|
||||
PrevDependentRoot [32]byte
|
||||
// Verified is true if the block's BLS contents have been verified.
|
||||
Verified bool
|
||||
// Optimistic is true if the block is optimistic.
|
||||
|
||||
@@ -57,10 +57,6 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := beaconState.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -118,7 +114,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
|
||||
@@ -43,10 +43,8 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr1 := preForkState.HistoricalRoots()
|
||||
hr2 := mSt.HistoricalRoots()
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -297,6 +298,9 @@ func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) erro
|
||||
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
|
||||
// proposer for that slot and assigns them accordingly.
|
||||
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "helpers.ProposerAssignments")
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
@@ -345,6 +349,9 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
// It retrieves active validator indices, determines the number of committees per slot, and computes
|
||||
// assignments for each validator based on their presence in the provided validators slice.
|
||||
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
|
||||
71
beacon-chain/core/peerdas/BUILD.bazel
Normal file
71
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,71 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"das_core.go",
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"reconstruction.go",
|
||||
"util.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
402
beacon-chain/core/peerdas/das_core.go
Normal file
402
beacon-chain/core/peerdas/das_core.go
Normal file
@@ -0,0 +1,402 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
ErrMismatchSize = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of an uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
return nil, ErrCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
// Shortcut if all custody groups are needed.
|
||||
if custodyGroupCount == numberOfCustodyGroup {
|
||||
custodyGroups := make([]uint64, 0, numberOfCustodyGroup)
|
||||
for i := range numberOfCustodyGroup {
|
||||
custodyGroups = append(custodyGroups, i)
|
||||
}
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
custodyGroupsMap := make(map[uint64]bool, custodyGroupCount)
|
||||
custodyGroups := make([]uint64, 0, custodyGroupCount)
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
|
||||
// Add the custody group to the map.
|
||||
if !custodyGroupsMap[custodyGroup] {
|
||||
custodyGroupsMap[custodyGroup] = true
|
||||
custodyGroups = append(custodyGroups, custodyGroup)
|
||||
}
|
||||
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
// Overflow prevention.
|
||||
currentId = uint256.NewInt(0)
|
||||
} else {
|
||||
// Increment the current ID.
|
||||
currentId.Add(currentId, one)
|
||||
}
|
||||
|
||||
// Sort the custody groups.
|
||||
slices.Sort[[]uint64](custodyGroups)
|
||||
}
|
||||
|
||||
// Final check.
|
||||
if uint64(len(custodyGroups)) != custodyGroupCount {
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup >= numberOfCustodyGroup {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrMismatchSize
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnsSidecarsFromItems(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnsSidecarsFromItems computes the data column sidecars from the signed block header, the blob KZG commiments,
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
func DataColumnsSidecarsFromItems(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrMismatchSize
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for columnIndex := range numberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, ErrIndexTooLarge
|
||||
}
|
||||
|
||||
return columnIndex % numberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the number of needed columns, including the number of columns is odd case.
|
||||
neededColumnCount := (numberOfColumns + 1) / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
index := dataColumnSideCar.Index
|
||||
|
||||
if index < neededColumnCount {
|
||||
sliceIndexFromColumnIndex[index] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := uint64(len(sliceIndexFromColumnIndex))
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
var missingColumnsSlice []uint64
|
||||
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[i]; !ok {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort[[]uint64](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.Column))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].Column)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[columnIndex]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.Column[blobIndex]
|
||||
|
||||
for i := range cell {
|
||||
blob[columnIndex*kzg.BytesPerCell+uint64(i)] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
custodyGroupCount := len(custodyGroups)
|
||||
|
||||
// Compute the columns for each custody group.
|
||||
columns := make(map[uint64]bool, custodyGroupCount)
|
||||
for _, group := range custodyGroups {
|
||||
if group >= numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
groupColumns, err := ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
for _, column := range groupColumns {
|
||||
columns[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
indicesSlice := make([]uint64, 0, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
311
beacon-chain/core/peerdas/das_core_test.go
Normal file
311
beacon-chain/core/peerdas/das_core_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestCustodyGroups(t *testing.T) {
|
||||
// The happy path is unit tested in spec tests.
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge)
|
||||
}
|
||||
|
||||
func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
// The happy path is unit tested in spec tests.
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nil signed block", func(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("empty cells and proofs", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
|
||||
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchSize)
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------------------------------------------------------
|
||||
// DataColumnsSidecarsFromItems is tested as part of the DataColumnSidecars tests, in the TestDataColumnsSidecarsBlobsRoundtrip function.
|
||||
// --------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
t.Run("index too large", func(t *testing.T) {
|
||||
_, err := peerdas.ComputeCustodyGroupForColumn(1_000_000)
|
||||
require.ErrorIs(t, err, peerdas.ErrIndexTooLarge)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
expected := uint64(2)
|
||||
actual, err := peerdas.ComputeCustodyGroupForColumn(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
expected = uint64(3)
|
||||
actual, err = peerdas.ComputeCustodyGroupForColumn(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
expected = uint64(2)
|
||||
actual, err = peerdas.ComputeCustodyGroupForColumn(66)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
expected = uint64(3)
|
||||
actual, err = peerdas.ComputeCustodyGroupForColumn(67)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
for i := uint64(2); i < numberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
Index: i,
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumns(t *testing.T) {
|
||||
t.Run("group too large", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyColumns([]uint64{1_000_000})
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
input := []uint64{1, 2}
|
||||
expected := map[uint64]bool{1: true, 2: true}
|
||||
|
||||
actual, err := peerdas.CustodyColumns(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range actual {
|
||||
require.Equal(t, expected[i], actual[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
192
beacon-chain/core/peerdas/info.go
Normal file
192
beacon-chain/core/peerdas/info.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
nodeInfoCachKeySize = 32 + 8
|
||||
)
|
||||
|
||||
var (
|
||||
nodeInfoCacheMut sync.Mutex
|
||||
nodeInfoCache *lru.Cache
|
||||
)
|
||||
|
||||
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
|
||||
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
// Create a new cache if it doesn't exist.
|
||||
if err := createInfoCacheIfNeeded(); err != nil {
|
||||
return nil, false, errors.Wrap(err, "create cache if needed")
|
||||
}
|
||||
|
||||
// Compute the key.
|
||||
key := computeInfoCacheKey(nodeID, custodyGroupCount)
|
||||
|
||||
// If the value is already in the cache, return it.
|
||||
if value, ok := nodeInfoCache.Get(key); ok {
|
||||
peerInfo, ok := value.(*info)
|
||||
if !ok {
|
||||
return nil, false, errors.New("failed to cast peer info (should never happen)")
|
||||
}
|
||||
|
||||
return peerInfo, true, nil
|
||||
}
|
||||
|
||||
// The peer info is not in the cache, compute it.
|
||||
// Compute custody groups.
|
||||
custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody groups")
|
||||
}
|
||||
|
||||
// Compute custody columns.
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Compute data columns subnets.
|
||||
dataColumnsSubnets := DataColumnSubnets(custodyColumns)
|
||||
|
||||
// Convert the custody groups to a map.
|
||||
custodyGroupsMap := make(map[uint64]bool, len(custodyGroups))
|
||||
for _, group := range custodyGroups {
|
||||
custodyGroupsMap[group] = true
|
||||
}
|
||||
|
||||
result := &info{
|
||||
CustodyGroups: custodyGroupsMap,
|
||||
CustodyColumns: custodyColumns,
|
||||
DataColumnsSubnets: dataColumnsSubnets,
|
||||
}
|
||||
|
||||
// Add the result to the cache.
|
||||
nodeInfoCache.Add(key, result)
|
||||
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
defer nodeInfoCacheMut.Unlock()
|
||||
|
||||
if nodeInfoCache == nil {
|
||||
c, err := lru.New(nodeInfoCacheSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "lru new")
|
||||
}
|
||||
|
||||
nodeInfoCache = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount.
|
||||
func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte {
|
||||
var key [nodeInfoCachKeySize]byte
|
||||
|
||||
copy(key[:32], nodeID[:])
|
||||
binary.BigEndian.PutUint64(key[32:], custodyGroupCount)
|
||||
|
||||
return key
|
||||
}
|
||||
133
beacon-chain/core/peerdas/info_test.go
Normal file
133
beacon-chain/core/peerdas/info_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
nodeID := enode.ID{}
|
||||
custodyGroupCount := uint64(7)
|
||||
|
||||
expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
|
||||
for _, cached := range []bool{false, true} {
|
||||
actual, ok, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cached, ok)
|
||||
require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups)
|
||||
require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns)
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
162
beacon-chain/core/peerdas/p2p_interface.go
Normal file
162
beacon-chain/core/peerdas/p2p_interface.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
||||
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
||||
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
||||
ErrBadRootLength = errors.New("bad root length")
|
||||
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
||||
ErrRecordNil = errors.New("record is nil")
|
||||
ErrNilBlockHeader = errors.New("nil beacon block header")
|
||||
ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
// A sidecar for zero blobs is invalid.
|
||||
if len(sidecar.KzgCommitments) == 0 {
|
||||
return ErrNoKzgCommitments
|
||||
}
|
||||
|
||||
// The column length must be equal to the number of commitments/proofs.
|
||||
if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) {
|
||||
return ErrMismatchLength
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
|
||||
// Note: We are slightly deviating from the specification here:
|
||||
// The specification verifies the KZG proofs for each sidecar separately,
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.Column)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
for i := range sidecar.Column {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.Index)
|
||||
cells = append(cells, kzg.Cell(sidecar.Column[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify that the cells match the corresponding commitments and proofs.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
if !verified {
|
||||
return ErrInvalidKZGProof
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
}
|
||||
|
||||
root := sidecar.SignedBlockHeader.Header.BodyRoot
|
||||
if len(root) != fieldparams.RootLength {
|
||||
return ErrBadRootLength
|
||||
}
|
||||
|
||||
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
|
||||
|
||||
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generate trie from items")
|
||||
}
|
||||
|
||||
hashTreeRoot, err := sparse.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hash tree root")
|
||||
}
|
||||
|
||||
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
|
||||
if !verified {
|
||||
return ErrInvalidInclusionProof
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
// DataColumnSubnets computes the subnets for the data columns.
|
||||
func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool {
|
||||
subnets := make(map[uint64]bool, len(dataColumns))
|
||||
|
||||
for column := range dataColumns {
|
||||
subnet := ComputeSubnetForDataColumnSidecar(column)
|
||||
subnets[subnet] = true
|
||||
}
|
||||
|
||||
return subnets
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromRecord extracts the custody group count from an ENR record.
|
||||
func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
if record == nil {
|
||||
return 0, ErrRecordNil
|
||||
}
|
||||
|
||||
// Load the `cgc`
|
||||
var cgc Cgc
|
||||
if err := record.Load(&cgc); err != nil {
|
||||
return 0, ErrCannotLoadCustodyGroupCount
|
||||
}
|
||||
|
||||
return uint64(cgc), nil
|
||||
}
|
||||
304
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
304
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
)
|
||||
|
||||
func TestVerifyDataColumnSidecar(t *testing.T) {
|
||||
t.Run("index too large", func(t *testing.T) {
|
||||
roSidecar := createTestSidecar(t, 1_000_000, nil, nil, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrIndexTooLarge)
|
||||
})
|
||||
|
||||
t.Run("no commitments", func(t *testing.T) {
|
||||
roSidecar := createTestSidecar(t, 0, nil, nil, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrNoKzgCommitments)
|
||||
})
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
kzgCommitments := make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, nil, kzgCommitments, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
|
||||
t.Run("KZG proofs size mismatch", func(t *testing.T) {
|
||||
column, kzgCommitments := make([][]byte, 1), make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
column, kzgCommitments, kzgProofs := make([][]byte, 1), make([][]byte, 1), make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, kzgProofs)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
generateSidecars := func(t *testing.T) []*ethpb.DataColumnSidecar {
|
||||
const blobCount = int64(6)
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
|
||||
for i := range blobCount {
|
||||
blob := getRandBlob(i)
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitments = append(commitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = commitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
generateRODataColumnSidecars := func(t *testing.T, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateSidecars(t)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateSidecars(t)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
const (
|
||||
blobCount = 3
|
||||
columnIndex = 0
|
||||
)
|
||||
|
||||
// Generate random KZG commitments `blobCount` blobs.
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
pbBody := ðpb.BeaconBlockBodyDeneb{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
},
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
}
|
||||
|
||||
root, err := pbBody.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
body, err := blocks.NewBeaconBlockBody(pbBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
expectedError error
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar
|
||||
}{
|
||||
{
|
||||
name: "nilSignedBlockHeader",
|
||||
expectedError: peerdas.ErrNilBlockHeader,
|
||||
dataColumnSidecar: ðpb.DataColumnSidecar{},
|
||||
},
|
||||
{
|
||||
name: "nilHeader",
|
||||
expectedError: peerdas.ErrNilBlockHeader,
|
||||
dataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalidBodyRoot",
|
||||
expectedError: peerdas.ErrBadRootLength,
|
||||
dataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unverifiedMerkleProof",
|
||||
expectedError: peerdas.ErrInvalidInclusionProof,
|
||||
dataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
KzgCommitments: kzgCommitments,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nominal",
|
||||
expectedError: nil,
|
||||
dataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
KzgCommitments: kzgCommitments,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
BodyRoot: root[:],
|
||||
},
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
roDataColumn := blocks.RODataColumn{DataColumnSidecar: tc.dataColumnSidecar}
|
||||
err = peerdas.VerifyDataColumnSidecarInclusionProof(roDataColumn)
|
||||
if tc.expectedError == nil {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.ErrorIs(t, tc.expectedError, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSubnetForDataColumnSidecar(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DataColumnSidecarSubnetCount = 128
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
require.Equal(t, uint64(0), peerdas.ComputeSubnetForDataColumnSidecar(0))
|
||||
require.Equal(t, uint64(1), peerdas.ComputeSubnetForDataColumnSidecar(1))
|
||||
require.Equal(t, uint64(0), peerdas.ComputeSubnetForDataColumnSidecar(128))
|
||||
require.Equal(t, uint64(1), peerdas.ComputeSubnetForDataColumnSidecar(129))
|
||||
}
|
||||
|
||||
func TestDataColumnSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DataColumnSidecarSubnetCount = 128
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
input := map[uint64]bool{0: true, 1: true, 128: true, 129: true, 131: true}
|
||||
expected := map[uint64]bool{0: true, 1: true, 3: true}
|
||||
actual := peerdas.DataColumnSubnets(input)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromRecord(t *testing.T) {
|
||||
t.Run("nil record", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyGroupCountFromRecord(nil)
|
||||
require.ErrorIs(t, err, peerdas.ErrRecordNil)
|
||||
})
|
||||
|
||||
t.Run("no cgc", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyGroupCountFromRecord(&enr.Record{})
|
||||
require.ErrorIs(t, err, peerdas.ErrCannotLoadCustodyGroupCount)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Cgc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn {
|
||||
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: index,
|
||||
Column: column,
|
||||
KzgCommitments: kzgCommitments,
|
||||
KzgProofs: kzgProofs,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
return roSidecar
|
||||
}
|
||||
76
beacon-chain/core/peerdas/reconstruction.go
Normal file
76
beacon-chain/core/peerdas/reconstruction.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count.
|
||||
func CanSelfReconstruct(custodyGroupCount uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfCustodyGroups
|
||||
// If total is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If total is even, then we need total / 2 columns to reconstruct.
|
||||
return custodyGroupCount >= (total+1)/2
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(dataColumnSideCars []*ethpb.DataColumnSidecar) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].Column)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.Column)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.Column
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
57
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
57
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfCustodyGroups uint64
|
||||
custodyNumberOfGroups uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
54
beacon-chain/core/peerdas/util.go
Normal file
54
beacon-chain/core/peerdas/util.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs.
|
||||
// This is a convenience method as blob and cell proofs are common inputs.
|
||||
func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Fulu block.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(blobs))*numberOfColumns != uint64(len(cellProofs)) {
|
||||
return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numberOfColumns, len(cellProofs))
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs))
|
||||
|
||||
for i, blob := range blobs {
|
||||
var b kzg.Blob
|
||||
copy(b[:], blob)
|
||||
cells, err := kzg.ComputeCells(&b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
proofs = append(proofs, kzg.Proof(cellProofs[idx]))
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
})
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
57
beacon-chain/core/peerdas/utils_test.go
Normal file
57
beacon-chain/core/peerdas/utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
30
beacon-chain/core/peerdas/validator.go
Normal file
30
beacon-chain/core/peerdas/validator.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
validator, err := state.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "validator at index %v", index)
|
||||
}
|
||||
|
||||
totalNodeBalance += validator.EffectiveBalance()
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
}
|
||||
55
beacon-chain/core/peerdas/validator_test.go
Normal file
55
beacon-chain/core/peerdas/validator_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
count uint64
|
||||
expected uint64
|
||||
}{
|
||||
{name: "0 validators", count: 0, expected: 8},
|
||||
{name: "1 validator", count: 1, expected: 8},
|
||||
{name: "8 validators", count: 8, expected: 8},
|
||||
{name: "9 validators", count: 9, expected: 9},
|
||||
{name: "100 validators", count: 100, expected: 100},
|
||||
{name: "128 validators", count: 128, expected: 128},
|
||||
{name: "129 validators", count: 129, expected: 128},
|
||||
{name: "1000 validators", count: 1000, expected: 128},
|
||||
}
|
||||
|
||||
const balance = uint64(32_000_000_000)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, 0, tc.count)
|
||||
for range tc.count {
|
||||
validator := ðpb.Validator{
|
||||
EffectiveBalance: balance,
|
||||
}
|
||||
|
||||
validators = append(validators, validator)
|
||||
}
|
||||
|
||||
validatorsIndex := make(map[primitives.ValidatorIndex]bool)
|
||||
for i := range tc.count {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,7 +59,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadO
|
||||
|
||||
func (s *Store) getBlock(ctx context.Context, blockRoot [32]byte, tx *bolt.Tx) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return v.(interfaces.ReadOnlySignedBeaconBlock), nil
|
||||
return v, nil
|
||||
}
|
||||
// This method allows the caller to pass in its tx if one is already open.
|
||||
// Or if a nil value is used, a transaction will be managed intenally.
|
||||
|
||||
@@ -13,8 +13,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
"github.com/dgraph-io/ristretto"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/dgraph-io/ristretto/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -86,8 +88,8 @@ var blockedBuckets = [][]byte{
|
||||
type Store struct {
|
||||
db *bolt.DB
|
||||
databasePath string
|
||||
blockCache *ristretto.Cache
|
||||
validatorEntryCache *ristretto.Cache
|
||||
blockCache *ristretto.Cache[string, interfaces.ReadOnlySignedBeaconBlock]
|
||||
validatorEntryCache *ristretto.Cache[[]byte, *ethpb.Validator]
|
||||
stateSummaryCache *stateSummaryCache
|
||||
ctx context.Context
|
||||
}
|
||||
@@ -156,7 +158,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
boltDB.AllocSize = boltAllocSize
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config[string, interfaces.ReadOnlySignedBeaconBlock]{
|
||||
NumCounters: 1000, // number of keys to track frequency of (1000).
|
||||
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
@@ -165,7 +167,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config[[]byte, *ethpb.Validator]{
|
||||
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
|
||||
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
|
||||
@@ -744,14 +744,9 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
|
||||
// get the entry bytes from the cache or from the DB.
|
||||
v, ok := s.validatorEntryCache.Get(key)
|
||||
if ok {
|
||||
valEntry, vType := v.(*ethpb.Validator)
|
||||
if vType {
|
||||
validatorEntries = append(validatorEntries, valEntry)
|
||||
validatorEntryCacheHit.Inc()
|
||||
} else {
|
||||
// this should never happen, but anyway it's good to bail out if one happens.
|
||||
return errors.New("validator cache does not have proper object type")
|
||||
}
|
||||
valEntry := v
|
||||
validatorEntries = append(validatorEntries, valEntry)
|
||||
validatorEntryCacheHit.Inc()
|
||||
} else {
|
||||
// not in cache, so get it from the DB, decode it and add to the entry list.
|
||||
valEntryBytes := valBkt.Get(key)
|
||||
|
||||
@@ -321,15 +321,11 @@ func TestState_CanSaveRetrieveValidatorEntriesFromCache(t *testing.T) {
|
||||
hash, hashErr := stateValidators[i].HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
|
||||
data, ok := db.validatorEntryCache.Get(string(hash[:]))
|
||||
data, ok := db.validatorEntryCache.Get(hash[:])
|
||||
assert.Equal(t, true, ok)
|
||||
require.NotNil(t, data)
|
||||
|
||||
valEntry, vType := data.(*ethpb.Validator)
|
||||
assert.Equal(t, true, vType)
|
||||
require.NotNil(t, valEntry)
|
||||
|
||||
require.DeepSSZEqual(t, stateValidators[i], valEntry, "validator entry is not matching")
|
||||
require.DeepSSZEqual(t, stateValidators[i], data, "validator entry is not matching")
|
||||
}
|
||||
|
||||
// check if all the validator entries are still intact in the validator entry bucket.
|
||||
@@ -447,7 +443,7 @@ func TestState_DeleteState(t *testing.T) {
|
||||
assert.NoError(t, hashErr)
|
||||
v, found := db.validatorEntryCache.Get(hash[:])
|
||||
require.Equal(t, false, found)
|
||||
require.Equal(t, nil, v)
|
||||
require.IsNil(t, v)
|
||||
}
|
||||
|
||||
// check if the index of the first state is deleted.
|
||||
|
||||
@@ -865,6 +865,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -1031,6 +1032,7 @@ func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
|
||||
}
|
||||
|
||||
service := prometheus.NewService(
|
||||
b.cliCtx.Context,
|
||||
fmt.Sprintf("%s:%d", b.cliCtx.String(cmd.MonitoringHostFlag.Name), b.cliCtx.Int(flags.MonitoringPortFlag.Name)),
|
||||
b.services,
|
||||
additionalHandlers...,
|
||||
|
||||
@@ -285,13 +285,13 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: should we check if the update is too early or too late to broadcast?
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
log.Debug("Successfully broadcast light client optimistic update")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -311,13 +311,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: should we check if the update is too early or too late to broadcast?
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
log.Debug("Successfully broadcast light client finality update")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +44,12 @@ const (
|
||||
// blsToExecutionChangeWeight specifies the scoring weight that we apply to
|
||||
// our bls to execution topic.
|
||||
blsToExecutionChangeWeight = 0.05
|
||||
// lightClientOptimisticUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client optimistic update topic.
|
||||
lightClientOptimisticUpdateWeight = 0.05
|
||||
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client finality update topic.
|
||||
lightClientFinalityUpdateWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -124,6 +130,10 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientOptimisticUpdateMessage):
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -503,6 +513,50 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientOptimisticUpdateWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientFinalityUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientFinalityUpdateWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func oneSlotDuration() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
}
|
||||
|
||||
@@ -75,4 +75,6 @@ func TestLoggingParameters(_ *testing.T) {
|
||||
logGossipParameters("testing", defaultAttesterSlashingTopicParams())
|
||||
logGossipParameters("testing", defaultProposerSlashingTopicParams())
|
||||
logGossipParameters("testing", defaultVoluntaryExitTopicParams())
|
||||
logGossipParameters("testing", defaultLightClientOptimisticUpdateTopicParams())
|
||||
logGossipParameters("testing", defaultLightClientFinalityUpdateTopicParams())
|
||||
}
|
||||
|
||||
@@ -73,6 +73,12 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientOptimisticUpdateTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientOptimisticUpdateAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
@@ -87,6 +93,12 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientOptimisticUpdateTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientOptimisticUpdateAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
@@ -101,6 +113,12 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientOptimisticUpdateTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientOptimisticUpdateCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
@@ -115,6 +133,12 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientOptimisticUpdateTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientOptimisticUpdateDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
@@ -129,4 +153,10 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientOptimisticUpdateTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientOptimisticUpdateDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(LightClientFinalityUpdateTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.LightClientFinalityUpdateElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -36,6 +37,8 @@ type Broadcaster interface {
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -148,6 +149,16 @@ func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientOptimisticUpdate -- fake.
|
||||
func (*FakeP2P) BroadcastLightClientOptimisticUpdate(_ context.Context, _ interfaces.LightClientOptimisticUpdate) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientFinalityUpdate -- fake.
|
||||
func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfaces.LightClientFinalityUpdate) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -48,6 +49,18 @@ func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSide
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientOptimisticUpdate records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastLightClientOptimisticUpdate(_ context.Context, _ interfaces.LightClientOptimisticUpdate) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientFinalityUpdate records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfaces.LightClientFinalityUpdate) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -207,6 +208,18 @@ func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientOptimisticUpdate broadcasts an optimistic update for mock.
|
||||
func (p *TestP2P) BroadcastLightClientOptimisticUpdate(_ context.Context, _ interfaces.LightClientOptimisticUpdate) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastLightClientFinalityUpdate broadcasts a finality update for mock.
|
||||
func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfaces.LightClientFinalityUpdate) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
lightclientConsensusTypes "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -36,6 +37,12 @@ var (
|
||||
// AttesterSlashingMap maps the fork-version to the underlying data type for that particular
|
||||
// fork period.
|
||||
AttesterSlashingMap map[[4]byte]func() (ethpb.AttSlashing, error)
|
||||
// LightClientOptimisticUpdateMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
LightClientOptimisticUpdateMap map[[4]byte]func() (interfaces.LightClientOptimisticUpdate, error)
|
||||
// LightClientFinalityUpdateMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
LightClientFinalityUpdateMap map[[4]byte]func() (interfaces.LightClientFinalityUpdate, error)
|
||||
)
|
||||
|
||||
// InitializeDataMaps initializes all the relevant object maps. This function is called to
|
||||
@@ -179,4 +186,42 @@ func InitializeDataMaps() {
|
||||
return ðpb.AttesterSlashingElectra{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client optimistic update map.
|
||||
LightClientOptimisticUpdateMap = map[[4]byte]func() (interfaces.LightClientOptimisticUpdate, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateAltair(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateAltair(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateCapella(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
LightClientFinalityUpdateMap = map[[4]byte]func() (interfaces.LightClientFinalityUpdate, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateAltair(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateAltair(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateCapella(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,8 +77,9 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the StaticPeerID flag is not set and if peerDAS is not enabled, return the private key.
|
||||
if !(cfg.StaticPeerID || params.PeerDASEnabled()) {
|
||||
// If the StaticPeerID flag is not set or the Fulu epoch is not set, return the private key.
|
||||
// Starting at Fulu, we don't want to generate a new key every time, to avoid custody columns changes.
|
||||
if !(cfg.StaticPeerID || params.FuluEnabled()) {
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//peer:go_default_library",
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 169, len(data))
|
||||
assert.Equal(t, 175, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -545,6 +545,18 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
assert.Equal(t, "1152", v)
|
||||
case "NUMBER_OF_CUSTODY_GROUPS":
|
||||
assert.Equal(t, "128", v)
|
||||
case "BALANCE_PER_ADDITIONAL_CUSTODY_GROUP":
|
||||
assert.Equal(t, "32000000000", v)
|
||||
case "CUSTODY_REQUIREMENT":
|
||||
assert.Equal(t, "4", v)
|
||||
case "SAMPLES_PER_SLOT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "VALIDATOR_CUSTODY_REQUIREMENT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS":
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_BLOB_COMMITMENTS_PER_BLOCK":
|
||||
assert.Equal(t, "95", v)
|
||||
case "MAX_BYTES_PER_TRANSACTION":
|
||||
@@ -559,6 +571,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_FULU":
|
||||
assert.Equal(t, "12", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
)
|
||||
|
||||
const DefaultEventFeedDepth = 1000
|
||||
const payloadAttributeTimeout = 2 * time.Second
|
||||
|
||||
const (
|
||||
InvalidTopic = "__invalid__"
|
||||
@@ -627,6 +626,7 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
}
|
||||
|
||||
var errUnsupportedPayloadAttribute = errors.New("cannot compute payload attributes pre-Bellatrix")
|
||||
var errPayloadAttributeExpired = errors.New("skipping payload attribute event for past slot")
|
||||
|
||||
func (s *Server) computePayloadAttributes(ctx context.Context, st state.ReadOnlyBeaconState, root [32]byte, proposer primitives.ValidatorIndex, timestamp uint64, randao []byte) (payloadattribute.Attributer, error) {
|
||||
v := st.Version()
|
||||
@@ -711,7 +711,7 @@ func (s *Server) fillEventData(ctx context.Context, ev payloadattribute.EventDat
|
||||
return ev, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
// double check that we need to process_slots, just in case we got here via a hot state cache miss.
|
||||
if slots.ToEpoch(st.Slot()) == pse {
|
||||
if slots.ToEpoch(st.Slot()) < pse {
|
||||
start, err := slots.EpochStart(pse)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "invalid state slot; could not compute epoch start")
|
||||
@@ -750,7 +750,11 @@ func (s *Server) fillEventData(ctx context.Context, ev payloadattribute.EventDat
|
||||
// This event stream is intended to be used by builders and relays.
|
||||
// Parent fields are based on state at N_{current_slot}, while the rest of fields are based on state of N_{current_slot + 1}
|
||||
func (s *Server) payloadAttributesReader(ctx context.Context, ev payloadattribute.EventData) (lazyReader, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, payloadAttributeTimeout)
|
||||
deadline := slots.BeginsAt(ev.ProposalSlot, s.ChainInfoFetcher.GenesisTime())
|
||||
if deadline.Before(time.Now()) {
|
||||
return nil, errors.Wrapf(errPayloadAttributeExpired, "proposal slot time %d", deadline.Unix())
|
||||
}
|
||||
ctx, cancel := context.WithDeadline(ctx, deadline)
|
||||
edc := make(chan asyncPayloadAttrData)
|
||||
go func() {
|
||||
d := asyncPayloadAttrData{}
|
||||
|
||||
@@ -523,11 +523,14 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
// to avoid slot processing
|
||||
require.NoError(t, st.SetSlot(currentSlot+1))
|
||||
b := tc.getBlock()
|
||||
genesis := time.Now()
|
||||
require.NoError(t, st.SetGenesisTime(uint64(genesis.Unix())))
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
Genesis: genesis,
|
||||
}
|
||||
headRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -557,7 +560,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: payloadattribute.EventData{
|
||||
ProposerIndex: 0,
|
||||
ProposalSlot: 0,
|
||||
ProposalSlot: mockChainService.CurrentSlot() + 1,
|
||||
ParentBlockNumber: 0,
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
HeadBlock: b,
|
||||
|
||||
@@ -103,7 +103,6 @@ go_library(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -67,6 +66,7 @@ func (vs *Server) StreamSlots(req *ethpb.StreamSlotsRequest, stream ethpb.Beacon
|
||||
select {
|
||||
case ev := <-ch:
|
||||
var s primitives.Slot
|
||||
var currDependentRoot, prevDependentRoot [32]byte
|
||||
if req.VerifiedOnly {
|
||||
if ev.Type != statefeed.BlockProcessed {
|
||||
continue
|
||||
@@ -76,6 +76,8 @@ func (vs *Server) StreamSlots(req *ethpb.StreamSlotsRequest, stream ethpb.Beacon
|
||||
continue
|
||||
}
|
||||
s = data.Slot
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
} else {
|
||||
if ev.Type != blockfeed.ReceivedBlock {
|
||||
continue
|
||||
@@ -85,24 +87,14 @@ func (vs *Server) StreamSlots(req *ethpb.StreamSlotsRequest, stream ethpb.Beacon
|
||||
continue
|
||||
}
|
||||
s = data.SignedBlock.Block().Slot()
|
||||
}
|
||||
currEpoch := slots.ToEpoch(s)
|
||||
currDepRoot, err := vs.ForkchoiceFetcher.DependentRoot(currEpoch)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get dependent root: %v", err)
|
||||
}
|
||||
prevDepRoot := currDepRoot
|
||||
if currEpoch > 0 {
|
||||
prevDepRoot, err = vs.ForkchoiceFetcher.DependentRoot(currEpoch - 1)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get dependent root: %v", err)
|
||||
}
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
}
|
||||
if err := stream.Send(
|
||||
ðpb.StreamSlotsResponse{
|
||||
Slot: s,
|
||||
PreviousDutyDependentRoot: prevDepRoot[:],
|
||||
CurrentDutyDependentRoot: currDepRoot[:],
|
||||
PreviousDutyDependentRoot: prevDependentRoot[:],
|
||||
CurrentDutyDependentRoot: currDependentRoot[:],
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -80,9 +81,11 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "getDuties.BuildResponse")
|
||||
defer span.End()
|
||||
|
||||
validatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
if ctx.Err() != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
|
||||
|
||||
@@ -47,7 +47,7 @@ import (
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/peer"
|
||||
@@ -146,7 +146,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
log.WithField("address", address).Info("gRPC server listening on port")
|
||||
|
||||
opts := []grpc.ServerOption{
|
||||
grpc.StatsHandler(&ocgrpc.ServerHandler{}),
|
||||
grpc.StatsHandler(otelgrpc.NewServerHandler()),
|
||||
grpc.StreamInterceptor(middleware.ChainStreamServer(
|
||||
recovery.StreamServerInterceptor(
|
||||
recovery.WithRecoveryHandlerContext(tracing.RecoveryHandlerFunc),
|
||||
|
||||
@@ -68,7 +68,7 @@ type ReadOnlyBeaconState interface {
|
||||
Slot() primitives.Slot
|
||||
Fork() *ethpb.Fork
|
||||
LatestBlockHeader() *ethpb.BeaconBlockHeader
|
||||
HistoricalRoots() ([][]byte, error)
|
||||
HistoricalRoots() [][]byte
|
||||
HistoricalSummaries() ([]*ethpb.HistoricalSummary, error)
|
||||
Slashings() []uint64
|
||||
FieldReferencesCount() map[string]uint64
|
||||
|
||||
@@ -73,15 +73,15 @@ func (b *BeaconState) forkVal() *ethpb.Fork {
|
||||
}
|
||||
|
||||
// HistoricalRoots based on epochs stored in the beacon state.
|
||||
func (b *BeaconState) HistoricalRoots() ([][]byte, error) {
|
||||
func (b *BeaconState) HistoricalRoots() [][]byte {
|
||||
if b.historicalRoots == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.historicalRoots.Slice(), nil
|
||||
return b.historicalRoots.Slice()
|
||||
}
|
||||
|
||||
// HistoricalSummaries of the beacon state.
|
||||
|
||||
@@ -118,44 +118,12 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
|
||||
// Electra partial withdrawals functionality.
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
processedPartialWithdrawalsCount := uint64(0)
|
||||
var err error
|
||||
if b.version >= version.Electra {
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range withdrawals {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
withdrawals, withdrawalIndex, processedPartialWithdrawalsCount, err = b.processPendingPartialWithdrawals(withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,12 +139,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
}
|
||||
if b.version >= version.Electra {
|
||||
var partiallyWithdrawnBalance uint64
|
||||
for _, w := range withdrawals {
|
||||
if w.ValidatorIndex == validatorIndex {
|
||||
partiallyWithdrawnBalance += w.Amount
|
||||
}
|
||||
}
|
||||
partiallyWithdrawnBalance := withdrawalTotal(withdrawals, validatorIndex)
|
||||
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
@@ -211,6 +174,54 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func withdrawalTotal(ws []*enginev1.Withdrawal, idx primitives.ValidatorIndex) (total uint64) {
|
||||
for _, w := range ws {
|
||||
if w.ValidatorIndex == idx {
|
||||
total += w.Amount
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *BeaconState) processPendingPartialWithdrawals(withdrawals []*enginev1.Withdrawal) ([]*enginev1.Withdrawal, uint64, uint64, error) {
|
||||
withdrawalIndex := b.nextWithdrawalIndex
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
processedPartialWithdrawalsCount := uint64(0)
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
totalWithdrawn := withdrawalTotal(withdrawals, w.Index)
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return nil, withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
return withdrawals, withdrawalIndex, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||
if b.version < version.Electra {
|
||||
return nil, errNotSupported("PendingPartialWithdrawals", b.version)
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
// This specifies the limit till which we process all dirty indices for a certain field.
|
||||
// If we have more dirty indices than the threshold, then we rebuild the whole trie. This
|
||||
// comes due to the fact that O(alogn) > O(n) beyond a certain value of a.
|
||||
indicesLimit = 8000
|
||||
indicesLimit = 20000
|
||||
)
|
||||
|
||||
// SetGenesisTime for the beacon state.
|
||||
|
||||
@@ -400,11 +400,11 @@ func TestDuplicateDirtyIndices(t *testing.T) {
|
||||
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], []uint64{0, 1, 2, 3, 4}...)
|
||||
|
||||
// We would remove the duplicates and stay under the threshold
|
||||
newState.addDirtyIndices(types.Balances, []uint64{9997, 9998})
|
||||
newState.addDirtyIndices(types.Balances, []uint64{20997, 20998})
|
||||
assert.Equal(t, false, newState.rebuildTrie[types.Balances])
|
||||
|
||||
// We would trigger above the threshold.
|
||||
newState.addDirtyIndices(types.Balances, []uint64{10000, 10001, 10002, 10003})
|
||||
newState.addDirtyIndices(types.Balances, []uint64{21000, 21001, 21002, 21003})
|
||||
assert.Equal(t, true, newState.rebuildTrie[types.Balances])
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
@@ -47,6 +48,7 @@ go_library(
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_light_client.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
@@ -71,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
@@ -101,6 +104,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -182,6 +186,7 @@ go_test(
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_blob_test.go",
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_light_client_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contribution_proof_test.go",
|
||||
@@ -198,6 +203,7 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
|
||||
@@ -89,6 +89,10 @@ func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.C
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
case p2p.AttesterSlashingSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttesterSlashingMap, digest, clock)
|
||||
case p2p.LightClientOptimisticUpdateTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.LightClientOptimisticUpdateMap, digest, clock)
|
||||
case p2p.LightClientFinalityUpdateTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.LightClientFinalityUpdateMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -196,3 +197,11 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLightClientStore allows the sync package to access light client data.
|
||||
func WithLightClientStore(lcs *lightClient.Store) Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lcs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -165,6 +166,7 @@ type Service struct {
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
ctxMap ContextByteVersions
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
|
||||
@@ -133,6 +133,20 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.activeSyncSubnetIndices,
|
||||
func(currentSlot primitives.Slot) []uint64 { return []uint64{} },
|
||||
)
|
||||
if features.Get().EnableLightClient {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
|
||||
66
beacon-chain/sync/subscriber_light_client.go
Normal file
66
beacon-chain/sync/subscriber_light_client.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclientTypes "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) lightClientOptimisticUpdateSubscriber(_ context.Context, msg proto.Message) error {
|
||||
update, err := lightclientTypes.NewWrappedOptimisticUpdate(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client optimistic update.")
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(update)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) lightClientFinalityUpdateSubscriber(_ context.Context, msg proto.Message) error {
|
||||
update, err := lightclientTypes.NewWrappedFinalityUpdate(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client finality update.")
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(update)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
db "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -638,3 +640,137 @@ func createPeer(t *testing.T, topics ...string) *p2ptest.TestP2P {
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
|
||||
chainService := &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
beaconDB: d,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.validateLightClientOptimisticUpdate, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.lightClientOptimisticUpdateSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, p2pService.Digest)
|
||||
|
||||
r.markForChainStart()
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority())
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err, "Error generating light client optimistic update")
|
||||
|
||||
p2pService.ReceivePubSub(topic, update.Proto())
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
u := r.lcStore.LastOptimisticUpdate()
|
||||
assert.DeepEqual(t, update.Proto(), u.Proto())
|
||||
}
|
||||
|
||||
func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
|
||||
chainService := &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
beaconDB: d,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.validateLightClientFinalityUpdate, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.lightClientFinalityUpdateSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, p2pService.Digest)
|
||||
|
||||
r.markForChainStart()
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority())
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err, "Error generating light client finality update")
|
||||
|
||||
p2pService.ReceivePubSub(topic, update.Proto())
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
u := r.lcStore.LastFinalityUpdate()
|
||||
assert.DeepEqual(t, update.Proto(), u.Proto())
|
||||
}
|
||||
|
||||
160
beacon-chain/sync/validate_light_client.go
Normal file
160
beacon-chain/sync/validate_light_client.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore updates while syncing
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
_, span := trace.StartSpan(ctx, "sync.validateLightClientOptimisticUpdate")
|
||||
defer span.End()
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
newUpdate, ok := m.(interfaces.LightClientOptimisticUpdate)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := newUpdate.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [IGNORE] The optimistic_update is received after the block at signature_slot was given enough time
|
||||
// to propagate through the network -- i.e. validate that one-third of optimistic_update.signature_slot
|
||||
// has transpired (SECONDS_PER_SLOT / INTERVALS_PER_SLOT seconds after the start of the slot,
|
||||
// with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
earliestValidTime := slots.StartTime(uint64(s.cfg.clock.GenesisTime().Unix()), newUpdate.SignatureSlot()).
|
||||
Add(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot)).
|
||||
Add(-params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
if s.cfg.clock.Now().Before(earliestValidTime) {
|
||||
log.Debug("Newly received light client optimistic update ignored. not enough time passed for block to propagate")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
lastStoredUpdate := s.lcStore.LastOptimisticUpdate()
|
||||
if lastStoredUpdate != nil {
|
||||
lastUpdateSlot := lastStoredUpdate.AttestedHeader().Beacon().Slot
|
||||
newUpdateSlot := newUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
// [IGNORE] The attested_header.beacon.slot is greater than that of all previously forwarded optimistic_updates
|
||||
if newUpdateSlot <= lastUpdateSlot {
|
||||
log.Debug("Newly received light client optimistic update ignored. new update is older than stored update")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("New gossiped light client optimistic update validated.")
|
||||
|
||||
msg.ValidatorData = newUpdate.Proto()
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore updates while syncing
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
_, span := trace.StartSpan(ctx, "sync.validateLightClientFinalityUpdate")
|
||||
defer span.End()
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
newUpdate, ok := m.(interfaces.LightClientFinalityUpdate)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := newUpdate.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [IGNORE] The optimistic_update is received after the block at signature_slot was given enough time
|
||||
// to propagate through the network -- i.e. validate that one-third of optimistic_update.signature_slot
|
||||
// has transpired (SECONDS_PER_SLOT / INTERVALS_PER_SLOT seconds after the start of the slot,
|
||||
// with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
earliestValidTime := slots.StartTime(uint64(s.cfg.clock.GenesisTime().Unix()), newUpdate.SignatureSlot()).
|
||||
Add(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot)).
|
||||
Add(-params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
if s.cfg.clock.Now().Before(earliestValidTime) {
|
||||
log.Debug("Newly received light client finality update ignored. not enough time passed for block to propagate")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
lastStoredUpdate := s.lcStore.LastFinalityUpdate()
|
||||
if lastStoredUpdate != nil {
|
||||
lastUpdateSlot := lastStoredUpdate.FinalizedHeader().Beacon().Slot
|
||||
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
|
||||
// [IGNORE] The finalized_header.beacon.slot is greater than that of all previously forwarded finality_updates,
|
||||
// or it matches the highest previously forwarded slot and also has a sync_aggregate indicating supermajority (> 2/3)
|
||||
// sync committee participation while the previously forwarded finality_update for that slot did not indicate supermajority
|
||||
lastUpdateHasSupermajority := lightClient.UpdateHasSupermajority(lastStoredUpdate.SyncAggregate())
|
||||
newUpdateHasSupermajority := lightClient.UpdateHasSupermajority(newUpdate.SyncAggregate())
|
||||
|
||||
if newUpdateSlot < lastUpdateSlot {
|
||||
log.Debug("Newly received light client finality update ignored. new update is older than stored update")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if newUpdateSlot == lastUpdateSlot && (lastUpdateHasSupermajority || !newUpdateHasSupermajority) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Newly received light client finality update ignored. no supermajority advantage.")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("New gossiped light client finality update validated.")
|
||||
|
||||
msg.ValidatorData = newUpdate.Proto()
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
284
beacon-chain/sync/validate_light_client_test.go
Normal file
284
beacon-chain/sync/validate_light_client_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
func TestValidateLightClientOptimisticUpdate_NilMessageOrTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
|
||||
_, err := s.validateLightClientOptimisticUpdate(ctx, "", nil)
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
_, err = s.validateLightClientOptimisticUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
emptyTopic := ""
|
||||
_, err = s.validateLightClientOptimisticUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{
|
||||
Topic: &emptyTopic,
|
||||
}})
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
}
|
||||
|
||||
func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
genesisDrift int
|
||||
oldUpdateOptions []util.LightClientOption
|
||||
newUpdateOptions []util.LightClientOption
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "no previous update",
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "not enough time passed",
|
||||
genesisDrift: -secondsPerSlot / slotIntervals,
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update has no age advantage",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update is better - younger",
|
||||
genesisDrift: secondsPerSlot,
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
// drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift
|
||||
genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: &lightClient.Store{}}
|
||||
|
||||
var oldUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
if test.oldUpdateOptions != nil {
|
||||
l := util.NewTestLightClient(t, v, test.oldUpdateOptions...)
|
||||
oldUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(oldUpdate)
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, v, test.newUpdateOptions...)
|
||||
newUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, newUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
|
||||
r, err := s.validateLightClientOptimisticUpdate(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
if test.expectedErr != nil {
|
||||
require.ErrorIs(t, err, test.expectedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectedResult, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLightClientFinalityUpdate_NilMessageOrTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
|
||||
_, err := s.validateLightClientFinalityUpdate(ctx, "", nil)
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
_, err = s.validateLightClientFinalityUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
emptyTopic := ""
|
||||
_, err = s.validateLightClientFinalityUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{
|
||||
Topic: &emptyTopic,
|
||||
}})
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
}
|
||||
|
||||
func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
genesisDrift int
|
||||
oldUpdateOptions []util.LightClientOption
|
||||
newUpdateOptions []util.LightClientOption
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "no previous update",
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "not enough time passed",
|
||||
genesisDrift: -secondsPerSlot / slotIntervals,
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update has no advantage",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update is better - age",
|
||||
genesisDrift: secondsPerSlot,
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "new update is better - supermajority",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "old update is better - supermajority",
|
||||
oldUpdateOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "old update is better - age",
|
||||
oldUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
// drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift
|
||||
genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: &lightClient.Store{}}
|
||||
|
||||
var oldUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
if test.oldUpdateOptions != nil {
|
||||
l := util.NewTestLightClient(t, v, test.oldUpdateOptions...)
|
||||
oldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(oldUpdate)
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, v, test.newUpdateOptions...)
|
||||
newUpdate, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, newUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
|
||||
r, err := s.validateLightClientFinalityUpdate(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
if test.expectedErr != nil {
|
||||
require.ErrorIs(t, err, test.expectedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectedResult, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add light client p2p validator and subscriber functions.
|
||||
3
changelog/bastin_enable-light-client-gossip.md
Normal file
3
changelog/bastin_enable-light-client-gossip.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Enable light client gossip for optimistic and finality updates.
|
||||
3
changelog/come-maiz_upgrade_ristretto.md
Normal file
3
changelog/come-maiz_upgrade_ristretto.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Upgraded ristretto to v2.2.0, for RISC-V support.
|
||||
@@ -0,0 +1,7 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed gocognit on propose block rest path.
|
||||
|
||||
### Ignored
|
||||
|
||||
- Removed jsonify functions that duplicate FromConsensus functions in the structs package for rest calls on propose block.
|
||||
3
changelog/james-prysm_electra-withdrawal-cleanup.md
Normal file
3
changelog/james-prysm_electra-withdrawal-cleanup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- adding suggested cleanups to break out some internal functions from ExpectedWithdrawals.
|
||||
3
changelog/james-prysm_fix-ppw-index-alias.md
Normal file
3
changelog/james-prysm_fix-ppw-index-alias.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed wrong field name in pending partial withdrawals was returned on state json representation, described in https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/beacon-chain.md#pendingpartialwithdrawal
|
||||
2
changelog/kasey_relax-event-deadline.md
Normal file
2
changelog/kasey_relax-event-deadline.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- extend the payload attribute computation deadline to the beginning of the proposal slot.
|
||||
2
changelog/manu-peerdas-core.md
Normal file
2
changelog/manu-peerdas-core.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Implement peerDAS core functions.
|
||||
3
changelog/manu-peerdas-proto.md
Normal file
3
changelog/manu-peerdas-proto.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- PeerDAS: Add needed proto files and corresponding generated code.
|
||||
3
changelog/nisdas_increase_limit.md
Normal file
3
changelog/nisdas_increase_limit.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Increase indices limit in field trie rebuilding.
|
||||
3
changelog/potuz_force_duties_update.md
Normal file
3
changelog/potuz_force_duties_update.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Force duties start on received blocks.
|
||||
2
changelog/potuz_historical_roots.md
Normal file
2
changelog/potuz_historical_roots.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- HistoricalRoots should not return an error.
|
||||
3
changelog/potuz_nil_checkroot.md
Normal file
3
changelog/potuz_nil_checkroot.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Check for uninitialized duties on `checkDependentRoot`
|
||||
3
changelog/potuz_pass_roots_in_block_events.md
Normal file
3
changelog/potuz_pass_roots_in_block_events.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add dependent roots in block events.
|
||||
3
changelog/potuz_remove_slot_update_duties.md
Normal file
3
changelog/potuz_remove_slot_update_duties.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Removed the slot from `UpdateDuties`.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user