mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
202 Commits
ctx-check
...
fulu-bpo-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aee1349fbf | ||
|
|
e18e1a12de | ||
|
|
46a883fdf0 | ||
|
|
1b01218eb9 | ||
|
|
dfa945636f | ||
|
|
fd6c1d88c4 | ||
|
|
02f6b272df | ||
|
|
398e88b52b | ||
|
|
43e0d7bfb1 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
4323478a46 | ||
|
|
c311e652eb | ||
|
|
022f913374 | ||
|
|
6a5d78a331 | ||
|
|
6cc6c7e73d | ||
|
|
4095da8568 | ||
|
|
a910e3cb17 | ||
|
|
4d6457775b | ||
|
|
66e2138aaa | ||
|
|
3b98d4ac78 | ||
|
|
6fca970bfb | ||
|
|
2c2ac4aa67 | ||
|
|
4bee860907 | ||
|
|
39bfe243fd | ||
|
|
eb4e2d1b89 | ||
|
|
9fa502fb0d | ||
|
|
a2fd30497e | ||
|
|
f1288a18ec | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
543ebe857e | ||
|
|
e569df5ebc | ||
|
|
8c324cc491 | ||
|
|
265d84569c | ||
|
|
79b064a6cc | ||
|
|
182c18a7b2 | ||
|
|
8b9c161560 | ||
|
|
4a4532f3ba | ||
|
|
91b44360fc | ||
|
|
472c5da49e | ||
|
|
61207bd3ac | ||
|
|
a0060fa794 | ||
|
|
341c7abd7f | ||
|
|
3300866572 | ||
|
|
711984d942 | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
43
CHANGELOG.md
43
CHANGELOG.md
@@ -4,6 +4,49 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v6.0.4](https://github.com/prysmaticlabs/prysm/compare/v6.0.3...v6.0.4) - 2025-06-05
|
||||
|
||||
This release has more work on PeerDAS, and light client support. Additionally, we have a few bug fixes:
|
||||
- Blob cache size now correctly set at startup.
|
||||
- A fix for slashing protection history exports where the validator database was in a nested folder.
|
||||
- Corrected behavior of the API call for state committees with an invalid request.
|
||||
- `/bin/sh` is now symlinked to `/bin/bash` for Prysm docker images.
|
||||
|
||||
In the [Hoodi](https://github.com/eth-clients/hoodi) testnet, the default gas limit is raised to 60M gas.
|
||||
|
||||
### Added
|
||||
|
||||
- Add light client mainnet spec test. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15295)
|
||||
- Add support for light client req/resp domain. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15281)
|
||||
- Added /bin/sh simlink to docker images. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15294)
|
||||
- Added Prysm build data to otel tracing spans. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15302)
|
||||
- Add light client minimal spec test support for `update_ranking` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15297)
|
||||
- Add fulu operation and epoch processing spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15284)
|
||||
- Updated e2e Beacon API evaluator to support more endpoints, including the ones introduced in Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15304)
|
||||
- Data column sidecars verification methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15232)
|
||||
- Implement data column sidecars filesystem. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15257)
|
||||
- Add blob schedule support from https://github.com/ethereum/consensus-specs/pull/4277. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15272)
|
||||
- random forkchoice spec tests for fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15287)
|
||||
- Add ability to download nightly test vectors. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15312)
|
||||
- PeerDAS: Validation pipeline for data column sidecars received via gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15310)
|
||||
- PeerDAS: Implement P2P. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15347)
|
||||
- PeerDAS: Implement the blockchain package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15350)
|
||||
|
||||
### Changed
|
||||
|
||||
- Update spec tests to v1.6.0-alpha.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15306)
|
||||
- PeerDAS: Refactor the reconstruction pipeline. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15309)
|
||||
- PeerDAS: `DataColumnStorage.Get` - Exit early no columns are available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15309)
|
||||
- Default hoodi testnet builder gas limit to 60M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15361)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix cyclical dependencies issue when using testing/util package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15248)
|
||||
- Set seen blob cache size correctly based on current slot time at start up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15348)
|
||||
- Fix `slashing-protection-history export` failing when `validator.db` is in a nested folder like `data/direct/`. (#14954). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15351)
|
||||
- Made `/eth/v1/beacon/states/{state_id}/committees` endpoint return `400` when slot does not belong to the specified epoch, aligning with the Beacon API spec (#15355). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15356)
|
||||
- Removed eager validator context cancellation that was causing validator builder registrations to fail occasionally. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15369)
|
||||
|
||||
## [v6.0.3](https://github.com/prysmaticlabs/prysm/compare/v6.0.2...v6.0.3) - 2025-05-21
|
||||
|
||||
This release has important bugfixes for users of the [Beacon API](https://ethereum.github.io/beacon-APIs/). These fixes include:
|
||||
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -1,7 +1,7 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
@@ -255,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.0"
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-W7oKvoM0nAkyitykRxAw6kmCvjYC01IqiNJy0AmCnMM=",
|
||||
"minimal": "sha256-ig7/zxomjv6buBWMom4IxAJh3lFJ9+JnY44E7c8ZNP8=",
|
||||
"mainnet": "sha256-mjx+MkXtPhCNv4c4knLYLIkvIdpF7WTjx/ElvGPQzSo=",
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -280,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-u0RkIZIeGttb3sInR31mO64aBSwxALqO5SYIPlqEvPo=",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -923,7 +923,14 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
@@ -962,5 +969,6 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -33,8 +33,14 @@ type GetPeerResponse struct {
|
||||
Data *Peer `json:"data"`
|
||||
}
|
||||
|
||||
// Added Meta to align with beacon-api: https://ethereum.github.io/beacon-APIs/#/Node/getPeers
|
||||
type Meta struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type GetPeersResponse struct {
|
||||
Data []*Peer `json:"data"`
|
||||
Meta Meta `json:"meta"`
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
|
||||
@@ -219,4 +219,5 @@ type BeaconStateFulu struct {
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"tracked_proposer.go",
|
||||
@@ -50,6 +51,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -146,6 +148,7 @@ go_test(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
|
||||
@@ -40,10 +40,12 @@ var (
|
||||
errNotGenesisRoot = errors.New("root is not the genesis block root")
|
||||
// errBlacklistedBlock is returned when a block is blacklisted as invalid.
|
||||
errBlacklistedRoot = verification.AsVerificationFailure(errors.New("block root is blacklisted"))
|
||||
// errMaxBlobsExceeded is returned when the number of blobs in a block exceeds the maximum allowed.
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
// Some examples of invalid blocks are:
|
||||
|
||||
@@ -439,6 +439,9 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
|
||||
log.WithError(err).Debug("Could not remove blob from blob storage")
|
||||
}
|
||||
if err := s.dataColumnStorage.Remove(root); err != nil {
|
||||
log.WithError(err).Errorf("Could not remove data columns from data column storage for root %#x", root)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -127,9 +130,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -208,6 +211,15 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the data column storage backend for the blockchain service.
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncChecker sets the sync checker for the blockchain service.
|
||||
func WithSyncChecker(checker Checker) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SyncChecker = checker
|
||||
@@ -215,6 +227,15 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.slasherEnabled = enabled
|
||||
@@ -222,6 +243,15 @@ func WithSlasherEnabled(enabled bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithGenesisTime sets the genesis time for the blockchain service.
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLightClientStore sets the light client store for the blockchain service.
|
||||
func WithLightClientStore(lcs *lightclient.Store) Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lcs
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -88,7 +89,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -3,10 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
@@ -239,8 +241,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
@@ -578,12 +581,12 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
// missingIndices uses the expected commitments from the block to determine
|
||||
// missingBlobIndices uses the expected commitments from the block to determine
|
||||
// which BlobSidecar indices would need to be in the database for DA success.
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte, slot primitives.Slot) (map[uint64]struct{}, error) {
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,29 +595,238 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
missing[uint64(i)] = struct{}{}
|
||||
missing[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
// missingDataColumnIndices uses the expected data columns from the block to determine
|
||||
// which DataColumnSidecar indices would need to be in the database for DA success.
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all sidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
startWaiting ...chan<- bool, // For tests purposes only
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block, startWaiting...)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
return s.areBlobsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
startWaiting ...chan<- bool, // For tests purposes only
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Subscribe to newly data columns stored in the database.
|
||||
subscription, identsChan := s.dataColumnStorage.Subscribe()
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify the caller that we are waiting for data columns.
|
||||
if len(startWaiting) > 0 && startWaiting[0] != nil {
|
||||
startWaiting[0] <- true
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case idents := <-identsChan:
|
||||
if idents.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areBlobsAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
blockSlot := block.Slot()
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
@@ -634,9 +846,9 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -648,15 +860,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -678,13 +895,14 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
@@ -770,7 +988,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2p.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
|
||||
@@ -363,7 +363,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2p.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
@@ -2331,13 +2332,13 @@ func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(newTime.Unix()))
|
||||
}
|
||||
|
||||
func TestMissingIndices(t *testing.T) {
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
present []uint64
|
||||
result map[uint64]struct{}
|
||||
root [32]byte
|
||||
root [fieldparams.RootLength]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
@@ -2395,7 +2396,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2403,9 +2404,70 @@ func TestMissingIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(c.result), len(missing))
|
||||
for key := range c.result {
|
||||
m, ok := missing[key]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, c.result[key], m)
|
||||
require.Equal(t, true, missing[key])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
storedIndices []uint64
|
||||
input map[uint64]bool
|
||||
expected map[uint64]bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "zero len expected",
|
||||
input: map[uint64]bool{},
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
input: tooManyColumns,
|
||||
err: errMaxDataColumnsExceeded,
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
storedIndices: []uint64{},
|
||||
input: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
expected: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
input: map[uint64]bool{0: true, 1: true, 2: true},
|
||||
expected: map[uint64]bool{},
|
||||
storedIndices: []uint64{0, 1, 2, 3, 4}, // Extra columns stored but not expected
|
||||
},
|
||||
{
|
||||
name: "some missing",
|
||||
storedIndices: []uint64{0, 20},
|
||||
input: map[uint64]bool{0: true, 10: true, 20: true, 30: true},
|
||||
expected: map[uint64]bool{10: true, 30: true},
|
||||
},
|
||||
}
|
||||
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dcm, dcs := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
err := dcm.CreateFakeIndices(emptyRoot, 0, tc.storedIndices...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the function
|
||||
actual, err := missingDataColumnIndices(dcs, emptyRoot, tc.input)
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
|
||||
require.Equal(t, len(tc.expected), len(actual))
|
||||
for key := range tc.expected {
|
||||
require.Equal(t, true, actual[key])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -3246,6 +3308,233 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
reset()
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
options []Option
|
||||
blobKzgCommitmentsCount uint64
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
bodyRoot, err := block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
ProposerIndex: block.ProposerIndex,
|
||||
ParentRoot: block.ParentRoot,
|
||||
StateRoot: block.StateRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ctx, cancel, service, root, signed
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{
|
||||
{Index: 42, Slot: slot + 1}, // Needed index, but not for this slot.
|
||||
})
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 87, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
{Index: 1, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Not needed index
|
||||
{Index: 42, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
})
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
err := service.dataColumnStorage.Save(verifiedSidecarsWrongRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (reconstruction)", func(t *testing.T) {
|
||||
const (
|
||||
missingColumns = uint64(2)
|
||||
cgc = 128
|
||||
)
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnParams := make([]util.DataColumnParam, 0, missingColumns)
|
||||
for i := minimumColumnsCountToReconstruct - missingColumns; i < minimumColumnsCountToReconstruct; i++ {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnParams = append(dataColumnParams, dataColumnParam)
|
||||
}
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParams)
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
err := service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
go func() {
|
||||
<-startWaiting
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func setupLightClientTestRequirements(ctx context.Context, t *testing.T, s *Service, v int, options ...util.LightClientOption) (*util.TestLightClient, *postBlockProcessConfig) {
|
||||
var l *util.TestLightClient
|
||||
switch v {
|
||||
@@ -3310,7 +3599,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2p = &mockp2p.FakeP2P{}
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
@@ -3446,7 +3735,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2p = &mockp2p.FakeP2P{}
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -52,6 +53,13 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -74,6 +82,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -82,6 +91,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
@@ -97,10 +107,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -227,26 +239,34 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
@@ -180,6 +180,19 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
func TestHandleDA(t *testing.T) {
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(context.Background(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
s, tr := minimalTestService(t,
|
||||
|
||||
25
beacon-chain/blockchain/receive_data_column.go
Normal file
25
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReceiveDataColumns receives a batch of data columns.
|
||||
func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save(dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -64,6 +66,7 @@ type Service struct {
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
}
|
||||
@@ -81,7 +84,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -93,6 +96,7 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -106,22 +110,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -132,7 +140,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -142,12 +152,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -173,7 +186,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,7 +97,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -587,7 +587,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
@@ -144,9 +144,10 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithP2PBroadcaster(&mockAccessor{}),
|
||||
WithLightClientStore(&lightclient.Store{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
|
||||
@@ -75,6 +75,7 @@ type ChainService struct {
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
}
|
||||
@@ -715,6 +716,17 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -178,7 +178,7 @@ func VerifyAttestationNoVerifySignature(
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
}
|
||||
|
||||
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
|
||||
@@ -243,7 +243,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation")
|
||||
defer span.End()
|
||||
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(
|
||||
|
||||
@@ -229,13 +229,16 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
|
||||
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if commitmentCount > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -926,8 +926,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -96,12 +95,30 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -200,7 +217,7 @@ func createAttestationSignatureBatch(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := attestation.IsValidAttestationIndices(ctx, ia); err != nil {
|
||||
if err := attestation.IsValidAttestationIndices(ctx, ia, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := ia.GetAttestingIndices()
|
||||
|
||||
@@ -37,8 +37,11 @@ const (
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
|
||||
// BlockGossipReceived is sent after a block has been received from gossip or API that passes validation rules.
|
||||
BlockGossipReceived = 10
|
||||
BlockGossipReceived = 11
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -90,6 +93,11 @@ type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// DataColumnSidecarReceivedData is the data sent with DataColumnSidecarReceived events.
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockGossipReceivedData is the data sent with BlockGossipReceived events.
|
||||
type BlockGossipReceivedData struct {
|
||||
// SignedBlock is the block that was received.
|
||||
|
||||
@@ -2,23 +2,33 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["upgrade.go"],
|
||||
srcs = [
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
srcs = [
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
47
beacon-chain/core/fulu/transition.go
Normal file
47
beacon-chain/core/fulu/transition.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err := electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process epoch in fulu transition")
|
||||
}
|
||||
return processProposerLookahead(ctx, state)
|
||||
}
|
||||
|
||||
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
lastEpochStart := len(lookAhead) - int(params.BeaconConfig().SlotsPerEpoch)
|
||||
copy(lookAhead[:lastEpochStart], lookAhead[params.BeaconConfig().SlotsPerEpoch:])
|
||||
lastEpoch := slots.ToEpoch(state.Slot()) + params.BeaconConfig().MinSeedLookahead + 1
|
||||
indices, err := helpers.ActiveValidatorIndices(ctx, state, lastEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lastEpochProposers, err := helpers.PrecomputeProposerIndices(state, indices, lastEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not precompute proposer indices")
|
||||
}
|
||||
copy(lookAhead[lastEpochStart:], lastEpochProposers)
|
||||
return state.SetProposerLookahead(lookAhead)
|
||||
}
|
||||
28
beacon-chain/core/fulu/transition_test.go
Normal file
28
beacon-chain/core/fulu/transition_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package fulu_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessEpoch_CanProcessFulu(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
st, err := fulu.UpgradeToFulu(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
preLookahead, err := st.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
err = fulu.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
postLookahead, err := st.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, preLookahead[0], postLookahead[0])
|
||||
for i, v := range preLookahead[params.BeaconConfig().SlotsPerEpoch:] {
|
||||
require.Equal(t, v, postLookahead[i])
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -101,8 +105,12 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
@@ -163,6 +171,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
ProposerLookahead: proposerLookahead,
|
||||
}
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
preForkState := st.Copy()
|
||||
mSt, err := fulu.UpgradeToFulu(st)
|
||||
mSt, err := fulu.UpgradeToFulu(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
|
||||
@@ -277,10 +277,10 @@ type CommitteeAssignment struct {
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
}
|
||||
|
||||
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
// VerifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
|
||||
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
|
||||
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
|
||||
func VerifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
|
||||
nextEpoch := time.NextEpoch(state)
|
||||
if epoch > nextEpoch {
|
||||
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
|
||||
@@ -308,7 +308,7 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
@@ -351,6 +351,61 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
// LiteAssignment is a lite version of CommitteeAssignment, and has committee length
|
||||
// and validator committee index instead of the full committee list
|
||||
type LiteAssignment struct {
|
||||
AttesterSlot primitives.Slot // slot in which to attest
|
||||
CommitteeIndex primitives.CommitteeIndex // position of the committee in the slot
|
||||
CommitteeLength uint64 // number of members in the committee
|
||||
ValidatorCommitteeIndex uint64 // validator’s offset inside the committee
|
||||
}
|
||||
|
||||
// PrecomputeCommittees returns an array indexed by (slot-startSlot)
|
||||
// whose elements are the beacon committees of that slot.
|
||||
func PrecomputeCommittees(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
startSlot primitives.Slot,
|
||||
) ([][][]primitives.ValidatorIndex, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
out := make([][][]primitives.ValidatorIndex, cfg.SlotsPerEpoch)
|
||||
|
||||
for relativeSlot := primitives.Slot(0); relativeSlot < cfg.SlotsPerEpoch; relativeSlot++ {
|
||||
slot := startSlot + relativeSlot
|
||||
|
||||
comms, err := BeaconCommittees(ctx, st, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "BeaconCommittees failed at slot %d", slot)
|
||||
}
|
||||
out[relativeSlot] = comms
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AssignmentForValidator scans the cached committees once
|
||||
// and returns the duty for a single validator.
|
||||
func AssignmentForValidator(
|
||||
bySlot [][][]primitives.ValidatorIndex,
|
||||
startSlot primitives.Slot,
|
||||
vIdx primitives.ValidatorIndex,
|
||||
) *LiteAssignment {
|
||||
for relativeSlot, committees := range bySlot {
|
||||
for cIdx, committee := range committees {
|
||||
for pos, member := range committee {
|
||||
if member == vIdx {
|
||||
return &LiteAssignment{
|
||||
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
|
||||
CommitteeIndex: primitives.CommitteeIndex(cIdx),
|
||||
CommitteeLength: uint64(len(committee)),
|
||||
ValidatorCommitteeIndex: uint64(pos),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil // validator is not scheduled this epoch
|
||||
}
|
||||
|
||||
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
|
||||
// It retrieves active validator indices, determines the number of committees per slot, and computes
|
||||
// assignments for each validator based on their presence in the provided validators slice.
|
||||
@@ -359,7 +414,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
@@ -500,21 +555,31 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip cache update if the key already exists
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
// use the state if post fulu (EIP-7917)
|
||||
if state.Version() >= version.Fulu {
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
proposerIndices = lookAhead[:params.BeaconConfig().SlotsPerEpoch]
|
||||
} else {
|
||||
// Skip cache update if the key already exists
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err = PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
}
|
||||
}
|
||||
// This is here to deal with tests only
|
||||
var indicesArray [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
@@ -601,6 +666,25 @@ func ComputeCommittee(
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// InitializeProposerLookahead computes the list of the proposer indices for the next cou of epochs.
|
||||
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
|
||||
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute proposer indices")
|
||||
}
|
||||
for _, proposerIndex := range proposerIndices {
|
||||
lookAhead = append(lookAhead, uint64(proposerIndex))
|
||||
}
|
||||
}
|
||||
return lookAhead, nil
|
||||
}
|
||||
|
||||
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
|
||||
@@ -871,3 +871,48 @@ func TestBeaconCommitteesFromCache(t *testing.T) {
|
||||
assert.DeepEqual(t, committees[idx], committee)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrecomputeCommittees_HappyPath(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
start := primitives.Slot(100)
|
||||
ctx := context.Background()
|
||||
st, _ := util.DeterministicGenesisState(t, 256)
|
||||
|
||||
got, err := helpers.PrecomputeCommittees(ctx, st, start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(got), int(cfg.SlotsPerEpoch), "outer slice length mismatch")
|
||||
|
||||
for i := range got {
|
||||
expSlot := start + primitives.Slot(i)
|
||||
comms, err := helpers.BeaconCommittees(ctx, st, expSlot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, comms, got[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssignmentForValidator(t *testing.T) {
|
||||
start := primitives.Slot(200)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{7, 8, 9}},
|
||||
}
|
||||
vIdx := primitives.ValidatorIndex(8)
|
||||
|
||||
got := helpers.AssignmentForValidator(bySlot, start, vIdx)
|
||||
|
||||
require.NotNil(t, got)
|
||||
require.Equal(t, start+1, got.AttesterSlot)
|
||||
require.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
|
||||
require.Equal(t, uint64(3), got.CommitteeLength)
|
||||
require.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
|
||||
|
||||
t.Run("Not Found", func(t *testing.T) {
|
||||
start = primitives.Slot(300)
|
||||
bySlot = [][][]primitives.ValidatorIndex{
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
got = helpers.AssignmentForValidator(bySlot, start, primitives.ValidatorIndex(99))
|
||||
require.IsNil(t, got)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -299,9 +299,29 @@ func ProposerIndexAtSlotFromCheckpoint(c *forkchoicetypes.Checkpoint, slot primi
|
||||
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
if e < stateEpoch || e > stateEpoch+1 {
|
||||
return 0, errors.Errorf("slot %d is not in the current epoch %d or the next epoch", slot, stateEpoch)
|
||||
}
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
|
||||
@@ -1171,3 +1171,29 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
lookahead[34] = 42
|
||||
pbState := ethpb.BeaconStateFulu{
|
||||
Slot: 100,
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoFulu(&pbState)
|
||||
require.NoError(t, err)
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(t.Context(), st, 96)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(15), idx)
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 97)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(16), idx)
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"validator.go",
|
||||
"verification.go",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Validators: validators})
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateFulu{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -303,7 +303,11 @@ func ProcessSlotsCore(ctx context.Context, span trace.Span, state state.BeaconSt
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
var err error
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() >= version.Electra {
|
||||
if state.Version() >= version.Fulu {
|
||||
if err = fulu.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else if state.Version() >= version.Electra {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
@@ -377,7 +381,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
}
|
||||
|
||||
if time.CanUpgradeToFulu(slot) {
|
||||
state, err = fulu.UpgradeToFulu(state)
|
||||
state, err = fulu.UpgradeToFulu(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
|
||||
@@ -3,22 +3,27 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"cache.go",
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -27,13 +32,18 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -41,6 +51,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -20,16 +20,16 @@ var (
|
||||
errMixedRoots = errors.New("BlobSidecars must all be for the same block")
|
||||
)
|
||||
|
||||
// LazilyPersistentStore is an implementation of AvailabilityStore to be used when batch syncing.
|
||||
// LazilyPersistentStoreBlob is an implementation of AvailabilityStore to be used when batch syncing.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStore struct {
|
||||
type LazilyPersistentStoreBlob struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStore{}
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -42,10 +42,10 @@ type BlobBatchVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
|
||||
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStore {
|
||||
return &LazilyPersistentStore{
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStoreBlob {
|
||||
return &LazilyPersistentStoreBlob{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
}
|
||||
}
|
||||
@@ -53,25 +53,31 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
if len(sc) == 0 {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(sc[0])
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stash(&sc[i]); err != nil {
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -80,7 +86,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
@@ -116,9 +116,11 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
@@ -141,12 +143,14 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
@@ -155,7 +159,10 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
@@ -163,10 +170,13 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
208
beacon-chain/das/availability_columns.go
Normal file
208
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
319
beacon-chain/das/availability_columns_test.go
Normal file
319
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -4,33 +4,29 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
var errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type cacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
root [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
entries map[cacheKey]*cacheEntry
|
||||
type blobCache struct {
|
||||
entries map[cacheKey]*blobCacheEntry
|
||||
}
|
||||
|
||||
func newCache() *cache {
|
||||
return &cache{entries: make(map[cacheKey]*cacheEntry)}
|
||||
func newBlobCache() *blobCache {
|
||||
return &blobCache{entries: make(map[cacheKey]*blobCacheEntry)}
|
||||
}
|
||||
|
||||
// keyFromSidecar is a convenience method for constructing a cacheKey from a BlobSidecar value.
|
||||
@@ -44,34 +40,34 @@ func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *cache) ensure(key cacheKey) *cacheEntry {
|
||||
func (c *blobCache) ensure(key cacheKey) *blobCacheEntry {
|
||||
e, ok := c.entries[key]
|
||||
if !ok {
|
||||
e = &cacheEntry{}
|
||||
e = &blobCacheEntry{}
|
||||
c.entries[key] = e
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *cache) delete(key cacheKey) {
|
||||
func (c *blobCache) delete(key cacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
// blobCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type blobCacheEntry struct {
|
||||
scs []*blocks.ROBlob
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
|
||||
func (e *blobCacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of BlobSidecars.
|
||||
// Only the first BlobSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given blob is already in the cache, or if the Index is out of bounds.
|
||||
func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(sc.Slot())
|
||||
if sc.Index >= uint64(maxBlobsPerBlock) {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.Index)
|
||||
@@ -92,7 +88,7 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
|
||||
count := len(kc)
|
||||
if e.diskSummary.AllAvailable(count) {
|
||||
return nil, nil
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
c := newCache()
|
||||
c := newBlobCache()
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
root := bytesutil.ToBytes32([]byte("root"))
|
||||
slot := primitives.Slot(1234)
|
||||
@@ -25,18 +25,18 @@ func TestCacheEnsureDelete(t *testing.T) {
|
||||
|
||||
c.delete(k)
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
var nilEntry *cacheEntry
|
||||
var nilEntry *blobCacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob)
|
||||
type filterTestCaseSetupFunc func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob)
|
||||
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
return func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
entry := &cacheEntry{}
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
@@ -125,12 +125,12 @@ func TestFilter(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob)
|
||||
setup func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob)
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "commitments mismatch - extra sidecar",
|
||||
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
commits[5] = nil
|
||||
return entry, commits, expected
|
||||
@@ -139,7 +139,7 @@ func TestFilter(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "sidecar missing",
|
||||
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5] = nil
|
||||
return entry, commits, expected
|
||||
@@ -148,7 +148,7 @@ func TestFilter(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "commitments mismatch - different bytes",
|
||||
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5].KzgCommitment = []byte("nope")
|
||||
return entry, commits, expected
|
||||
131
beacon-chain/das/data_column_cache.go
Normal file
131
beacon-chain/das/data_column_cache.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
type dataColumnCache struct {
|
||||
entries map[cacheKey]*dataColumnCacheEntry
|
||||
}
|
||||
|
||||
func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[cacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key cacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key cacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !sliceBytesEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func sliceBytesEqual(a, b [][]byte) bool {
|
||||
return slices.EqualFunc(a, b, bytes.Equal)
|
||||
}
|
||||
136
beacon-chain/das/data_column_cache_test.go
Normal file
136
beacon-chain/das/data_column_cache_test.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := cacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
|
||||
func TestSliceBytesEqual(t *testing.T) {
|
||||
t.Run("Different lengths", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Same length but different content", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 7}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Equal slices", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, true, sliceBytesEqual(a, b))
|
||||
})
|
||||
}
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
@@ -24,9 +25,13 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, sc...)
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -41,32 +42,18 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{6}: {
|
||||
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_034, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_168, Index: 0, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
)
|
||||
|
||||
@@ -77,29 +64,25 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
storage.WarmCache()
|
||||
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
|
||||
require.Equal(t, 6, len(storage.cache.cache))
|
||||
require.Equal(t, 5, len(storage.cache.cache))
|
||||
|
||||
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
|
||||
summary, ok := storage.cache.get(verifiedRoDataColumnSidecars[2].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[4].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[6].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[8].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[10].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
|
||||
}
|
||||
@@ -113,9 +96,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
|
||||
},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -126,7 +107,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -137,23 +118,34 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("different slots", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Create a sidecar with a different slot but the same root.
|
||||
alteredVerifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, 2)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedRoDataColumnSidecars[0])
|
||||
|
||||
altered, err := blocks.NewRODataColumnWithRoot(
|
||||
verifiedRoDataColumnSidecars[1].RODataColumn.DataColumnSidecar,
|
||||
verifiedRoDataColumnSidecars[0].BlockRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedAltered := blocks.NewVerifiedRODataColumn(altered)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedAltered)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
err = dataColumnStorage.Save(alteredVerifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
|
||||
})
|
||||
|
||||
t.Run("new file - no data columns to save", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {}},
|
||||
[]util.DataColumnParam{},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -164,11 +156,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("new file - different data column size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -180,7 +170,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
@@ -192,7 +184,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// column index and an different SSZ encoded size.
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Try to rewrite the file.
|
||||
@@ -203,17 +197,13 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
{2}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -223,16 +213,12 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, inputVerifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{3}: {
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -241,51 +227,47 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
fileName string
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
expectedIndices [mandatoryNumberOfColumns]byte
|
||||
dataColumnParams []util.DataColumnParams
|
||||
dataColumnParams []util.DataColumnParam
|
||||
}
|
||||
|
||||
fixtures := []fixture{
|
||||
{
|
||||
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{1},
|
||||
fileName: "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{2},
|
||||
fileName: "0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{3},
|
||||
fileName: "0/0/0x7b163bd57e1c4c8b5048c5389698098f4c957d62d7ce86f4ffa9bdc75c16a18b.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -294,7 +276,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Build expected data column sidecars.
|
||||
_, expectedDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
|
||||
fixture.dataColumnParams,
|
||||
)
|
||||
|
||||
// Build expected bytes.
|
||||
@@ -321,6 +303,8 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
|
||||
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
blockRoot := expectedDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
// Check the actual content of the file.
|
||||
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.NoError(t, err)
|
||||
@@ -329,18 +313,18 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Check the summary.
|
||||
indices := map[uint64]bool{}
|
||||
for _, dataColumnParam := range fixture.dataColumnParams {
|
||||
indices[dataColumnParam.ColumnIndex] = true
|
||||
indices[dataColumnParam.Index] = true
|
||||
}
|
||||
|
||||
summary := dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary := dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, indices[index], summary.HasIndex(index))
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Remove(fixture.blockRoot)
|
||||
err = dataColumnStorage.Remove(blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary = dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary = dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
@@ -363,11 +347,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("indices not found", func(t *testing.T) {
|
||||
_, savedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -375,7 +357,7 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(savedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{3, 1, 2})
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(savedVerifiedRoDataColumnSidecars[0].BlockRoot(), []uint64{3, 1, 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
|
||||
})
|
||||
@@ -383,11 +365,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, expectedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -395,11 +375,13 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
root := expectedVerifiedRoDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(root, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get(root, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
})
|
||||
@@ -415,15 +397,11 @@ func TestRemove(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 32, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 32, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 33, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 33, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -431,22 +409,22 @@ func TestRemove(t *testing.T) {
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
err = dataColumnStorage.Remove(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
summary := dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(0), summary.epoch)
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
|
||||
summary = dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[3].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(1), summary.epoch)
|
||||
require.Equal(t, uint64(2), summary.Count())
|
||||
|
||||
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
actual, err := dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[0].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
|
||||
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
|
||||
actual, err = dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[3].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(actual))
|
||||
})
|
||||
@@ -455,9 +433,9 @@ func TestRemove(t *testing.T) {
|
||||
func TestClear(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -466,8 +444,8 @@ func TestClear(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
"0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
@@ -493,8 +471,8 @@ func TestMetadata(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -504,7 +482,7 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Alter the version.
|
||||
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
|
||||
const filePath = "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
|
||||
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -644,31 +622,19 @@ func TestPrune(t *testing.T) {
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_169, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 262_144, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
)
|
||||
|
||||
@@ -697,31 +663,31 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x775283f428813c949b7e8af07f01fef9790137f021b3597ad2d0d81e8be8f0f0.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{
|
||||
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x9977031132157ebb9c81bce952003ce07a4f54e921ca63b7693d1562483fdf9f.sszs",
|
||||
"0xb2b14d9d858fa99b70f0405e4e39f38e51e36dd9a70343c109e24eeb5f77e369.sszs",
|
||||
}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x5106745cdd6b1aa3602ef4d000ef373af672019264c167fa4bd641a1094aa5c5.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 451_141, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 3 - Epoch 14_098
|
||||
},
|
||||
)
|
||||
|
||||
@@ -749,14 +715,14 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x0de28a18cae63cbc6f0b20dc1afb0b1df38da40824a5f09f92d485ade04de97f.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -46,14 +47,21 @@ func NewWarmedEphemeralBlobStorageUsingFs(t testing.TB, fs afero.Fs, opts ...Blo
|
||||
return bs
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
fs afero.Fs
|
||||
bs *BlobStorage
|
||||
}
|
||||
type (
|
||||
BlobMocker struct {
|
||||
fs afero.Fs
|
||||
bs *BlobStorage
|
||||
}
|
||||
|
||||
DataColumnMocker struct {
|
||||
fs afero.Fs
|
||||
dcs *DataColumnStorage
|
||||
}
|
||||
)
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, slot primitives.Slot, indices ...uint64) error {
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [fieldparams.RootLength]byte, slot primitives.Slot, indices ...uint64) error {
|
||||
for i := range indices {
|
||||
if err := bm.bs.layout.notify(newBlobIdent(root, slots.ToEpoch(slot), indices[i])); err != nil {
|
||||
return err
|
||||
@@ -62,6 +70,17 @@ func (bm *BlobMocker) CreateFakeIndices(root [32]byte, slot primitives.Slot, ind
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *DataColumnMocker) CreateFakeIndices(root [fieldparams.RootLength]byte, slot primitives.Slot, indices ...uint64) error {
|
||||
err := bm.dcs.cache.set(DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(slot), Indices: indices})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
@@ -119,6 +138,13 @@ func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...Dat
|
||||
return bs
|
||||
}
|
||||
|
||||
// NewEphemeralDataColumnStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *DataColumnStorage) {
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
|
||||
@@ -518,7 +518,7 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Fulu")
|
||||
}
|
||||
@@ -690,7 +690,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
@@ -97,6 +98,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -44,11 +45,16 @@ var (
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetBlobsV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -73,6 +79,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -85,11 +93,21 @@ const (
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -108,6 +126,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -257,6 +276,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
@@ -289,7 +311,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -298,33 +320,36 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
|
||||
if params.FuluEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
|
||||
}
|
||||
|
||||
elSupportedEndpointsSlice := make([]string, len(supportedEngineEndpoints))
|
||||
if err := s.rpcClient.CallContext(ctx, &elSupportedEndpointsSlice, ExchangeCapabilities, supportedEngineEndpoints); err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
var unsupported []string
|
||||
for _, s1 := range supportedEngineEndpoints {
|
||||
supported := false
|
||||
for _, s2 := range result {
|
||||
if s1 == s2 {
|
||||
supported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
unsupported = append(unsupported, s1)
|
||||
elSupportedEndpoints := make(map[string]bool, len(elSupportedEndpointsSlice))
|
||||
for _, method := range elSupportedEndpointsSlice {
|
||||
elSupportedEndpoints[method] = true
|
||||
}
|
||||
|
||||
unsupported := make([]string, 0, len(supportedEngineEndpoints))
|
||||
for _, method := range supportedEngineEndpoints {
|
||||
if !elSupportedEndpoints[method] {
|
||||
unsupported = append(unsupported, method)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unsupported) != 0 {
|
||||
log.Warnf("Please update client, detected the following unsupported engine methods: %s", unsupported)
|
||||
log.WithField("methods", unsupported).Warning("Connected execution client does not support some requested engine methods")
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
|
||||
return elSupportedEndpointsSlice, nil
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
@@ -495,9 +520,10 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
@@ -505,6 +531,19 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -615,6 +654,73 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Collect KZG hashes for all blobs
|
||||
var kzgHashes []common.Hash
|
||||
for _, commitment := range kzgCommitments {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from EL
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from EL")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
cellProofs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
blobs = append(blobs, blobsAndProofs.Blob)
|
||||
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from EL")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -902,3 +1008,8 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -167,6 +168,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -317,11 +319,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -372,11 +374,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -405,7 +407,52 @@ func TestClient_HTTP(t *testing.T) {
|
||||
|
||||
require.DeepEqual(t, requests, resp.ExecutionRequests)
|
||||
})
|
||||
t.Run(GetPayloadMethodV5, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionBundleFulu"].(*pb.GetPayloadV5ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
_, ok = resp.BlobsBundler.(*pb.BlobsBundleV2)
|
||||
if !ok {
|
||||
t.Logf("resp.BlobsBundler has unexpected type: %T", resp.BlobsBundler)
|
||||
}
|
||||
require.Equal(t, ok, true)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -1539,6 +1586,7 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayloadCapellaWithValue": s.ExecutionPayloadWithValueCapella,
|
||||
"ExecutionPayloadDenebWithValue": s.ExecutionPayloadWithValueDeneb,
|
||||
"ExecutionBundleElectra": s.ExecutionBundleElectra,
|
||||
"ExecutionBundleFulu": s.ExecutionBundleFulu,
|
||||
"ValidPayloadStatus": s.ValidPayloadStatus,
|
||||
"InvalidBlockHashStatus": s.InvalidBlockHashStatus,
|
||||
"AcceptedStatus": s.AcceptedStatus,
|
||||
@@ -1774,6 +1822,36 @@ func fixturesStruct() *payloadFixtures {
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
executionBundleFixtureFulu := &pb.GetPayloadV5ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleV2JSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1868,6 +1946,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
ExecutionPayloadWithValueCapella: executionPayloadWithValueFixtureCapella,
|
||||
ExecutionPayloadWithValueDeneb: executionPayloadWithValueFixtureDeneb,
|
||||
ExecutionBundleElectra: executionBundleFixtureElectra,
|
||||
ExecutionBundleFulu: executionBundleFixtureFulu,
|
||||
ValidPayloadStatus: validStatus,
|
||||
InvalidBlockHashStatus: inValidBlockHashStatus,
|
||||
AcceptedStatus: acceptedStatus,
|
||||
@@ -1892,6 +1971,7 @@ type payloadFixtures struct {
|
||||
ExecutionPayloadWithValueCapella *pb.GetPayloadV2ResponseJson
|
||||
ExecutionPayloadWithValueDeneb *pb.GetPayloadV3ResponseJson
|
||||
ExecutionBundleElectra *pb.GetPayloadV4ResponseJson
|
||||
ExecutionBundleFulu *pb.GetPayloadV5ResponseJson
|
||||
ValidPayloadStatus *pb.PayloadStatus
|
||||
InvalidBlockHashStatus *pb.PayloadStatus
|
||||
AcceptedStatus *pb.PayloadStatus
|
||||
@@ -2361,7 +2441,7 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
assert.LogsContain(t, logHook, "Please update client, detected the following unsupported engine methods:")
|
||||
assert.LogsContain(t, logHook, "Connected execution client does not support some requested engine methods")
|
||||
})
|
||||
t.Run("list of items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -2424,7 +2504,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
@@ -2476,6 +2556,76 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
@@ -2511,6 +2661,42 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
|
||||
}))
|
||||
}
|
||||
|
||||
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobAndCellProofs,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(respJSON)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
@@ -2522,6 +2708,12 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, client := setupRpcClient(t, url, client)
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
|
||||
@@ -38,6 +38,8 @@ type EngineClient struct {
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -113,6 +115,10 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -86,44 +87,47 @@ type serviceFlagOpts struct {
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -161,6 +165,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
lcStore: &lightclient.Store{},
|
||||
}
|
||||
@@ -190,6 +195,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new data column storage")
|
||||
}
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -280,7 +294,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -491,6 +507,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -697,6 +717,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -775,9 +796,11 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -862,8 +885,11 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -889,6 +915,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -983,6 +1011,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1010,6 +1039,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
@@ -1151,6 +1181,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,3 +50,20 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -168,7 +167,6 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -279,14 +278,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -305,13 +298,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -16,12 +16,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -61,6 +62,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -550,9 +552,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -617,9 +617,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -712,7 +710,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet)
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{{}: {{ColumnIndex: columnIndex}}})
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"sync"
|
||||
@@ -222,7 +223,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
@@ -504,7 +504,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
|
||||
clock, err := s.cfg.ClockWaiter.WaitForClock(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "timeout waiting for startup clock")
|
||||
}
|
||||
localNode, err = addForkEntry(localNode, clock.CurrentEpoch())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
@@ -3,13 +3,10 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
@@ -22,10 +19,11 @@ var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
// ForkDigest returns the current fork digest of
|
||||
// the node according to the local clock.
|
||||
func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
if !s.isInitialized() {
|
||||
if !s.isInitialized() || s.clock == nil {
|
||||
return [4]byte{}, errors.New("state is not initialized")
|
||||
}
|
||||
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
|
||||
|
||||
return params.ForkDigest(s.clock.CurrentEpoch()), nil
|
||||
}
|
||||
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
@@ -79,24 +77,9 @@ func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
// which takes into account the current fork version from the current
|
||||
// epoch to create a fork digest, the next fork version,
|
||||
// and the next fork epoch.
|
||||
func addForkEntry(
|
||||
node *enode.LocalNode,
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) (*enode.LocalNode, error) {
|
||||
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentSlot := slots.Since(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
if prysmTime.Now().Before(genesisTime) {
|
||||
currentEpoch = 0
|
||||
}
|
||||
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func addForkEntry(node *enode.LocalNode, current primitives.Epoch) (*enode.LocalNode, error) {
|
||||
digest := params.ForkDigest(current)
|
||||
nextForkVersion, nextForkEpoch := params.NextForkData(current)
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion[:],
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -231,10 +231,8 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
nextForkVersion := []byte{0, 0, 0, 1}
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion,
|
||||
@@ -256,7 +254,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode.Set(entry)
|
||||
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, clock.GenesisValidatorsRootSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := forkEntry(localNode.Node().Record())
|
||||
@@ -283,7 +281,8 @@ func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
|
||||
clock := startup.NewClock(time.Now(), bCfg.GenesisValidatorsRoot)
|
||||
localNode, err = addForkEntry(localNode, clock.CurrentEpoch())
|
||||
require.NoError(t, err)
|
||||
forkEntry, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -25,7 +25,10 @@ func (s *Service) forkWatcher() {
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
// but is fine nonetheless.
|
||||
if s.dv5Listener != nil { // make sure it's not a local network
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if s.clock == nil {
|
||||
continue
|
||||
}
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.clock.CurrentEpoch())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
|
||||
@@ -20,96 +20,106 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// P2P represents the full p2p interface composed of all of the sub-interfaces.
|
||||
type P2P interface {
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
PubSubProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
}
|
||||
type (
|
||||
// P2P represents the full p2p interface composed of all of the sub-interfaces.
|
||||
P2P interface {
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
PubSubProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar, peersChecked ...chan<- bool) error
|
||||
}
|
||||
// Accessor provides access to the Broadcaster and PeerManager interfaces.
|
||||
Accessor interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
type SetStreamHandler interface {
|
||||
SetStreamHandler(topic string, handler network.StreamHandler)
|
||||
}
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar, peersChecked ...chan<- bool) error
|
||||
}
|
||||
|
||||
// PubSubTopicUser provides way to join, use and leave PubSub topics.
|
||||
type PubSubTopicUser interface {
|
||||
JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error)
|
||||
LeaveTopic(topic string) error
|
||||
PublishToTopic(ctx context.Context, topic string, data []byte, opts ...pubsub.PubOpt) error
|
||||
SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error)
|
||||
}
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
SetStreamHandler interface {
|
||||
SetStreamHandler(topic string, handler network.StreamHandler)
|
||||
}
|
||||
|
||||
// ConnectionHandler configures p2p to handle connections with a peer.
|
||||
type ConnectionHandler interface {
|
||||
AddConnectionHandler(f func(ctx context.Context, id peer.ID) error,
|
||||
j func(ctx context.Context, id peer.ID) error)
|
||||
AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error)
|
||||
connmgr.ConnectionGater
|
||||
}
|
||||
// PubSubTopicUser provides way to join, use and leave PubSub topics.
|
||||
PubSubTopicUser interface {
|
||||
JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error)
|
||||
LeaveTopic(topic string) error
|
||||
PublishToTopic(ctx context.Context, topic string, data []byte, opts ...pubsub.PubOpt) error
|
||||
SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error)
|
||||
}
|
||||
|
||||
// SenderEncoder allows sending functionality from libp2p as well as encoding for requests and responses.
|
||||
type SenderEncoder interface {
|
||||
EncodingProvider
|
||||
Sender
|
||||
}
|
||||
// ConnectionHandler configures p2p to handle connections with a peer.
|
||||
ConnectionHandler interface {
|
||||
AddConnectionHandler(f func(ctx context.Context, id peer.ID) error,
|
||||
j func(ctx context.Context, id peer.ID) error)
|
||||
AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error)
|
||||
connmgr.ConnectionGater
|
||||
}
|
||||
|
||||
// EncodingProvider provides p2p network encoding.
|
||||
type EncodingProvider interface {
|
||||
Encoding() encoder.NetworkEncoding
|
||||
}
|
||||
// SenderEncoder allows sending functionality from libp2p as well as encoding for requests and responses.
|
||||
SenderEncoder interface {
|
||||
EncodingProvider
|
||||
Sender
|
||||
}
|
||||
|
||||
// PubSubProvider provides the p2p pubsub protocol.
|
||||
type PubSubProvider interface {
|
||||
PubSub() *pubsub.PubSub
|
||||
}
|
||||
// EncodingProvider provides p2p network encoding.
|
||||
EncodingProvider interface {
|
||||
Encoding() encoder.NetworkEncoding
|
||||
}
|
||||
|
||||
// PeerManager abstracts some peer management methods from libp2p.
|
||||
type PeerManager interface {
|
||||
Disconnect(peer.ID) error
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
// PubSubProvider provides the p2p pubsub protocol.
|
||||
PubSubProvider interface {
|
||||
PubSub() *pubsub.PubSub
|
||||
}
|
||||
|
||||
// Sender abstracts the sending functionality from libp2p.
|
||||
type Sender interface {
|
||||
Send(context.Context, interface{}, string, peer.ID) (network.Stream, error)
|
||||
}
|
||||
// PeerManager abstracts some peer management methods from libp2p.
|
||||
PeerManager interface {
|
||||
Disconnect(peer.ID) error
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
// PeersProvider abstracts obtaining our current list of known peers status.
|
||||
type PeersProvider interface {
|
||||
Peers() *peers.Status
|
||||
}
|
||||
// Sender abstracts the sending functionality from libp2p.
|
||||
Sender interface {
|
||||
Send(context.Context, interface{}, string, peer.ID) (network.Stream, error)
|
||||
}
|
||||
|
||||
// MetadataProvider returns the metadata related information for the local peer.
|
||||
type MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
// PeersProvider abstracts obtaining our current list of known peers status.
|
||||
PeersProvider interface {
|
||||
Peers() *peers.Status
|
||||
}
|
||||
|
||||
type DataColumnsHandler interface {
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
// MetadataProvider returns the metadata related information for the local peer.
|
||||
MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
// DataColumnsHandler abstracts some data columns related methods.
|
||||
DataColumnsHandler interface {
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
}
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
_, fEpoch, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition that should
|
||||
// never be hit.
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/golang/snappy"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -18,22 +18,22 @@ import (
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
d := params.ForkDigest(clock.CurrentEpoch())
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
|
||||
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -129,13 +128,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -13,39 +12,41 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx := t.Context()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -705,31 +705,47 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
// Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
|
||||
// own latency, or because of their finalized epoch at the time we queried them.
|
||||
// Returns epoch number and list of peers that are at or beyond that epoch.
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them. Returns epoch number and list of peers that are at or beyond
|
||||
// that epoch.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
var targetEpoch primitives.Epoch
|
||||
var mostVotes uint64
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
@@ -737,11 +753,12 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized epoch, in decreasing order.
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
})
|
||||
|
||||
@@ -764,26 +781,42 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// Calculate our head slot.
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
// key: head epoch, value: number of peers that support this epoch.
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: head epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
|
||||
ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
|
||||
// lower or equal than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.HeadSlot <= ourHeadSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which has enough peers' votes (>= minPeers).
|
||||
var targetEpoch primitives.Epoch
|
||||
targetEpoch := primitives.Epoch(0)
|
||||
for epoch, votes := range epochVotes {
|
||||
if votes >= uint64(minPeers) && targetEpoch < epoch {
|
||||
targetEpoch = epoch
|
||||
@@ -1012,16 +1045,23 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
// ip, err := manet.ToIP(peerData.Address)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "to ip")
|
||||
// }
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
// if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
// if val > CollocationLimit {
|
||||
// TODO: Remove this out of denvet.
|
||||
// return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
// log.WithFields(logrus.Fields{
|
||||
// "pid": pid,
|
||||
// "ip": ip.String(),
|
||||
// "colocationCount": val,
|
||||
// "colocationLimit": CollocationLimit,
|
||||
// }).Debug("Colocation limit exceeded. Peer should be banned.")
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package peers_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -329,55 +328,56 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -496,100 +496,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -53,36 +52,12 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
return false
|
||||
}
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Bellatrix fork digest")
|
||||
return false
|
||||
}
|
||||
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Electra fork digest")
|
||||
return false
|
||||
}
|
||||
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Fulu fork digest")
|
||||
return false
|
||||
}
|
||||
altairForkDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixForkDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaForkDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebForkDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraForkDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
fuluForkDigest := params.ForkDigest(params.BeaconConfig().FuluForkEpoch)
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -23,10 +21,8 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
type test struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -108,11 +104,12 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
tests = append(tests, tt)
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
if got := s.CanSubscribe(tt.topic); got != tt.want {
|
||||
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
|
||||
@@ -220,10 +217,8 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
type args struct {
|
||||
id peer.ID
|
||||
subs []*pubsubpb.RPC_SubOpts
|
||||
@@ -320,11 +315,12 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
||||
@@ -61,6 +61,12 @@ const LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
const LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -92,6 +98,12 @@ const (
|
||||
RPCLightClientFinalityUpdateTopicV1 = protocolPrefix + LightClientFinalityUpdateName + SchemaVersionV1
|
||||
// RPCLightClientOptimisticUpdateTopicV1 is a topic for requesting a light client Optimistic update.
|
||||
RPCLightClientOptimisticUpdateTopicV1 = protocolPrefix + LightClientOptimisticUpdateName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -139,6 +151,11 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -161,6 +178,8 @@ var messageMapping = map[string]bool{
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
@@ -82,6 +83,7 @@ type Service struct {
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
clock *startup.Clock
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
|
||||
@@ -10,12 +10,9 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
@@ -348,58 +345,57 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
clock := startup.NewClock(gt, gvr)
|
||||
require.NoError(t, gs.SetClock(clock))
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
return fd
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "refused to connect to bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := context.Background()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -74,8 +73,8 @@ func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node)
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - It looped during `batchPeriod` duration, or
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria, or
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
@@ -147,8 +146,6 @@ func (s *Service) FindPeersWithSubnet(
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
@@ -168,41 +165,29 @@ func (s *Service) FindPeersWithSubnet(
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
peersSummary := func(topic string, threshold int) int {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
return missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
if ctx.Err() != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
@@ -225,20 +210,14 @@ func (s *Service) FindPeersWithSubnet(
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,10 +9,13 @@ var (
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
)
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -221,7 +221,6 @@ func generateDataColumnIdentifiers(n int) []*eth.DataColumnsByRootIdentifier {
|
||||
|
||||
func TestDataColumnSidecarsByRootReq_Marshal(t *testing.T) {
|
||||
/*
|
||||
|
||||
SSZ encoding of DataColumnsByRootIdentifiers is tested in spectests.
|
||||
However, encoding a list of DataColumnsByRootIdentifier is not.
|
||||
We are testing it here.
|
||||
|
||||
@@ -201,6 +201,11 @@ func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
|
||||
return [32]byte{}, errors.Wrap(err, "parse public key")
|
||||
}
|
||||
|
||||
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
|
||||
newPubkey := &ecdsa.PublicKey{
|
||||
Curve: gCrypto.S256(),
|
||||
X: pubKeyObjSecp256k1.X(),
|
||||
Y: pubKeyObjSecp256k1.Y(),
|
||||
}
|
||||
|
||||
return enode.PubkeyToIDV4(newPubkey), nil
|
||||
}
|
||||
|
||||
@@ -245,7 +245,7 @@ func (s *Service) validatorEndpoints(
|
||||
template: "/eth/v2/validator/aggregate_attestation",
|
||||
name: namespace + ".GetAggregateAttestationV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
},
|
||||
handler: server.GetAggregateAttestationV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -314,7 +314,7 @@ func (s *Service) validatorEndpoints(
|
||||
template: "/eth/v1/validator/attestation_data",
|
||||
name: namespace + ".GetAttestationData",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
},
|
||||
handler: server.GetAttestationData,
|
||||
methods: []string{http.MethodGet},
|
||||
|
||||
@@ -1265,6 +1265,15 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not get epoch end slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate that the provided slot belongs to the specified epoch, as required by the Beacon API spec.
|
||||
if rawSlot != "" {
|
||||
if sl < uint64(startSlot) || sl > uint64(endSlot) {
|
||||
httputil.HandleError(w, fmt.Sprintf("Slot %d does not belong in epoch %d", sl, epoch), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
committeesPerSlot := corehelpers.SlotCommitteeCount(activeCount)
|
||||
committees := make([]*structs.Committee, 0)
|
||||
for slot := startSlot; slot <= endSlot; slot++ {
|
||||
|
||||
@@ -4078,6 +4078,47 @@ func TestGetCommittees(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
|
||||
t.Run("Invalid slot for given epoch", func(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
epoch string
|
||||
slot string
|
||||
expectMsg string
|
||||
}{
|
||||
{
|
||||
name: "Slot after the specified epoch",
|
||||
epoch: "10",
|
||||
slot: "400",
|
||||
expectMsg: "Slot 400 does not belong in epoch 10",
|
||||
},
|
||||
{
|
||||
name: "Slot before the specified epoch",
|
||||
epoch: "10",
|
||||
slot: "300",
|
||||
expectMsg: "Slot 300 does not belong in epoch 10",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
query := url + "?epoch=" + tc.epoch + "&slot=" + tc.slot
|
||||
request := httptest.NewRequest(http.MethodGet, query, nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetCommittees(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
var resp struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
err := json.Unmarshal(writer.Body.Bytes(), &resp)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectMsg, resp.Message)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBlockHeaders(t *testing.T) {
|
||||
|
||||
@@ -97,18 +97,19 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// parseIndices filters out invalid and duplicate blob indices
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]uint64, error) {
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]int, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]uint64, 0, params.BeaconConfig().MaxBlobsPerBlock(s))
|
||||
indices := make([]int, 0, maxBlobsPerBlock)
|
||||
invalidIndices := make([]string, 0)
|
||||
loop:
|
||||
for _, raw := range rawIndices {
|
||||
ix, err := strconv.ParseUint(raw, 10, 64)
|
||||
ix, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
if ix >= uint64(params.BeaconConfig().MaxBlobsPerBlock(s)) {
|
||||
if !(0 <= ix && ix < maxBlobsPerBlock) {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user